diff --git a/v1.26/alvistack-vagrant-kubernetes/PRODUCT.yaml b/v1.26/alvistack-vagrant-kubernetes/PRODUCT.yaml index 686995428e..96f9f7220b 100644 --- a/v1.26/alvistack-vagrant-kubernetes/PRODUCT.yaml +++ b/v1.26/alvistack-vagrant-kubernetes/PRODUCT.yaml @@ -1,6 +1,6 @@ vendor: AlviStack name: AlviStack - Vagrant Box Packaging for Kubernetes -version: 20230726.1.1 +version: 20230824.0.0 website_url: https://github.com/alvistack/vagrant-kubernetes documentation_url: https://github.com/alvistack/ansible-collection-kubernetes/tree/master/docs product_logo_url: https://raw.githubusercontent.com/alvistack/ansible-collection-kubernetes/master/docs/images/alvistack/stacked/color/alvistack-stacked-color.svg diff --git a/v1.26/alvistack-vagrant-kubernetes/README.md b/v1.26/alvistack-vagrant-kubernetes/README.md index ca6382b8d7..e830b3f8ab 100644 --- a/v1.26/alvistack-vagrant-kubernetes/README.md +++ b/v1.26/alvistack-vagrant-kubernetes/README.md @@ -100,9 +100,9 @@ Check result: root@kube01:~# kubectl get node NAME STATUS ROLES AGE VERSION - kube01 Ready control-plane 3h41m v1.26.7 - kube02 Ready control-plane 3h40m v1.26.7 - kube03 Ready 3h36m v1.26.7 + kube01 Ready control-plane 3h41m v1.26.8 + kube02 Ready control-plane 3h40m v1.26.8 + kube03 Ready 3h36m v1.26.8 root@kube01:~# kubectl get pod --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE diff --git a/v1.26/alvistack-vagrant-kubernetes/e2e.log b/v1.26/alvistack-vagrant-kubernetes/e2e.log index 6d5313adaa..0c2df6f5ee 100644 --- a/v1.26/alvistack-vagrant-kubernetes/e2e.log +++ b/v1.26/alvistack-vagrant-kubernetes/e2e.log @@ -1,8 +1,8 @@ -I0729 15:30:02.173268 13 e2e.go:126] Starting e2e run "d0d188fc-d094-4f2b-8739-c618e26462b8" on Ginkgo node 1 -Jul 29 15:30:02.209: INFO: Enabling in-tree volume drivers +I0824 11:39:13.004980 14 e2e.go:126] Starting e2e run "e37f2036-3a54-4653-ada1-c01489d8d1f1" on Ginkgo node 1 +Aug 24 11:39:13.051: INFO: Enabling in-tree volume drivers Running Suite: Kubernetes e2e suite - /usr/local/bin ==================================================== -Random Seed: 1690644601 - will randomize all specs +Random Seed: 1692877152 - will randomize all specs Will run 368 of 7069 specs ------------------------------ @@ -10,2610 +10,2091 @@ Will run 368 of 7069 specs test/e2e/e2e.go:77 [SynchronizedBeforeSuite] TOP-LEVEL test/e2e/e2e.go:77 -Jul 29 15:30:02.454: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 15:30:02.466: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable -Jul 29 15:30:02.498: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready -Jul 29 15:30:02.566: INFO: 20 / 20 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) -Jul 29 15:30:02.567: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready. -Jul 29 15:30:02.567: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start -Jul 29 15:30:02.581: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'cilium' (0 seconds elapsed) -Jul 29 15:30:02.581: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'cilium-node-init' (0 seconds elapsed) -Jul 29 15:30:02.581: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) -Jul 29 15:30:02.582: INFO: e2e test version: v1.26.7 -Jul 29 15:30:02.583: INFO: kube-apiserver version: v1.26.7 +Aug 24 11:39:13.273: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:39:13.278: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable +Aug 24 11:39:13.307: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready +Aug 24 11:39:13.359: INFO: 20 / 20 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) +Aug 24 11:39:13.360: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready. +Aug 24 11:39:13.360: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start +Aug 24 11:39:13.373: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'cilium' (0 seconds elapsed) +Aug 24 11:39:13.373: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'cilium-node-init' (0 seconds elapsed) +Aug 24 11:39:13.373: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) +Aug 24 11:39:13.373: INFO: e2e test version: v1.26.8 +Aug 24 11:39:13.374: INFO: kube-apiserver version: v1.26.8 [SynchronizedBeforeSuite] TOP-LEVEL test/e2e/e2e.go:77 -Jul 29 15:30:02.583: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 15:30:02.591: INFO: Cluster IP family: ipv4 +Aug 24 11:39:13.375: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:39:13.384: INFO: Cluster IP family: ipv4 ------------------------------ -[SynchronizedBeforeSuite] PASSED [0.139 seconds] +[SynchronizedBeforeSuite] PASSED [0.110 seconds] [SynchronizedBeforeSuite] test/e2e/e2e.go:77 Begin Captured GinkgoWriter Output >> [SynchronizedBeforeSuite] TOP-LEVEL test/e2e/e2e.go:77 - Jul 29 15:30:02.454: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 15:30:02.466: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable - Jul 29 15:30:02.498: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready - Jul 29 15:30:02.566: INFO: 20 / 20 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) - Jul 29 15:30:02.567: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready. - Jul 29 15:30:02.567: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start - Jul 29 15:30:02.581: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'cilium' (0 seconds elapsed) - Jul 29 15:30:02.581: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'cilium-node-init' (0 seconds elapsed) - Jul 29 15:30:02.581: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) - Jul 29 15:30:02.582: INFO: e2e test version: v1.26.7 - Jul 29 15:30:02.583: INFO: kube-apiserver version: v1.26.7 + Aug 24 11:39:13.273: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:39:13.278: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable + Aug 24 11:39:13.307: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready + Aug 24 11:39:13.359: INFO: 20 / 20 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) + Aug 24 11:39:13.360: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready. + Aug 24 11:39:13.360: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start + Aug 24 11:39:13.373: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'cilium' (0 seconds elapsed) + Aug 24 11:39:13.373: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'cilium-node-init' (0 seconds elapsed) + Aug 24 11:39:13.373: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) + Aug 24 11:39:13.373: INFO: e2e test version: v1.26.8 + Aug 24 11:39:13.374: INFO: kube-apiserver version: v1.26.8 [SynchronizedBeforeSuite] TOP-LEVEL test/e2e/e2e.go:77 - Jul 29 15:30:02.583: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 15:30:02.591: INFO: Cluster IP family: ipv4 + Aug 24 11:39:13.375: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:39:13.384: INFO: Cluster IP family: ipv4 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSS ------------------------------ -[sig-node] Secrets - should be consumable via the environment [NodeConformance] [Conformance] - test/e2e/common/node/secrets.go:95 -[BeforeEach] [sig-node] Secrets +[sig-storage] EmptyDir volumes + should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:207 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:30:02.645 -Jul 29 15:30:02.645: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 15:30:02.647 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:30:02.684 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:30:02.691 -[BeforeEach] [sig-node] Secrets +STEP: Creating a kubernetes client 08/24/23 11:39:13.435 +Aug 24 11:39:13.436: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 11:39:13.437 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:39:13.473 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:39:13.479 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable via the environment [NodeConformance] [Conformance] - test/e2e/common/node/secrets.go:95 -STEP: creating secret secrets-7782/secret-test-064b9847-0b20-48fd-ba3e-27df9a135c8f 07/29/23 15:30:02.7 -STEP: Creating a pod to test consume secrets 07/29/23 15:30:02.713 -Jul 29 15:30:02.731: INFO: Waiting up to 5m0s for pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1" in namespace "secrets-7782" to be "Succeeded or Failed" -Jul 29 15:30:02.742: INFO: Pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1": Phase="Pending", Reason="", readiness=false. Elapsed: 11.571165ms -Jul 29 15:30:04.754: INFO: Pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023135579s -Jul 29 15:30:06.755: INFO: Pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1": Phase="Pending", Reason="", readiness=false. Elapsed: 4.023937519s -Jul 29 15:30:08.752: INFO: Pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.020826842s -STEP: Saw pod success 07/29/23 15:30:08.752 -Jul 29 15:30:08.752: INFO: Pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1" satisfied condition "Succeeded or Failed" -Jul 29 15:30:08.758: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1 container env-test: -STEP: delete the pod 07/29/23 15:30:08.806 -Jul 29 15:30:08.833: INFO: Waiting for pod pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1 to disappear -Jul 29 15:30:08.839: INFO: Pod pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1 no longer exists -[AfterEach] [sig-node] Secrets +[It] should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:207 +STEP: Creating a pod to test emptydir 0666 on node default medium 08/24/23 11:39:13.485 +Aug 24 11:39:13.510: INFO: Waiting up to 5m0s for pod "pod-9d704f62-4d95-49c5-b380-04509ac78237" in namespace "emptydir-582" to be "Succeeded or Failed" +Aug 24 11:39:13.518: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 6.185248ms +Aug 24 11:39:15.533: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021717467s +Aug 24 11:39:17.526: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 4.014170556s +Aug 24 11:39:19.528: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 6.016760088s +Aug 24 11:39:21.523: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 8.011754703s +Aug 24 11:39:23.525: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 10.013286028s +Aug 24 11:39:25.527: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 12.015187536s +Aug 24 11:39:27.526: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 14.014417955s +Aug 24 11:39:29.527: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Succeeded", Reason="", readiness=false. Elapsed: 16.01509674s +STEP: Saw pod success 08/24/23 11:39:29.527 +Aug 24 11:39:29.528: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237" satisfied condition "Succeeded or Failed" +Aug 24 11:39:29.534: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-9d704f62-4d95-49c5-b380-04509ac78237 container test-container: +STEP: delete the pod 08/24/23 11:39:29.568 +Aug 24 11:39:29.590: INFO: Waiting for pod pod-9d704f62-4d95-49c5-b380-04509ac78237 to disappear +Aug 24 11:39:29.597: INFO: Pod pod-9d704f62-4d95-49c5-b380-04509ac78237 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 15:30:08.840: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Secrets +Aug 24 11:39:29.598: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Secrets +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Secrets +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-7782" for this suite. 07/29/23 15:30:08.846 +STEP: Destroying namespace "emptydir-582" for this suite. 08/24/23 11:39:29.606 ------------------------------ -• [SLOW TEST] [6.210 seconds] -[sig-node] Secrets -test/e2e/common/node/framework.go:23 - should be consumable via the environment [NodeConformance] [Conformance] - test/e2e/common/node/secrets.go:95 +• [SLOW TEST] [16.182 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:207 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Secrets + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:30:02.645 - Jul 29 15:30:02.645: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 15:30:02.647 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:30:02.684 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:30:02.691 - [BeforeEach] [sig-node] Secrets + STEP: Creating a kubernetes client 08/24/23 11:39:13.435 + Aug 24 11:39:13.436: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 11:39:13.437 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:39:13.473 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:39:13.479 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable via the environment [NodeConformance] [Conformance] - test/e2e/common/node/secrets.go:95 - STEP: creating secret secrets-7782/secret-test-064b9847-0b20-48fd-ba3e-27df9a135c8f 07/29/23 15:30:02.7 - STEP: Creating a pod to test consume secrets 07/29/23 15:30:02.713 - Jul 29 15:30:02.731: INFO: Waiting up to 5m0s for pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1" in namespace "secrets-7782" to be "Succeeded or Failed" - Jul 29 15:30:02.742: INFO: Pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1": Phase="Pending", Reason="", readiness=false. Elapsed: 11.571165ms - Jul 29 15:30:04.754: INFO: Pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023135579s - Jul 29 15:30:06.755: INFO: Pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1": Phase="Pending", Reason="", readiness=false. Elapsed: 4.023937519s - Jul 29 15:30:08.752: INFO: Pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.020826842s - STEP: Saw pod success 07/29/23 15:30:08.752 - Jul 29 15:30:08.752: INFO: Pod "pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1" satisfied condition "Succeeded or Failed" - Jul 29 15:30:08.758: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1 container env-test: - STEP: delete the pod 07/29/23 15:30:08.806 - Jul 29 15:30:08.833: INFO: Waiting for pod pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1 to disappear - Jul 29 15:30:08.839: INFO: Pod pod-configmaps-e2c0d644-9e0a-4ad2-932f-2f5c07118ec1 no longer exists - [AfterEach] [sig-node] Secrets + [It] should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:207 + STEP: Creating a pod to test emptydir 0666 on node default medium 08/24/23 11:39:13.485 + Aug 24 11:39:13.510: INFO: Waiting up to 5m0s for pod "pod-9d704f62-4d95-49c5-b380-04509ac78237" in namespace "emptydir-582" to be "Succeeded or Failed" + Aug 24 11:39:13.518: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 6.185248ms + Aug 24 11:39:15.533: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021717467s + Aug 24 11:39:17.526: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 4.014170556s + Aug 24 11:39:19.528: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 6.016760088s + Aug 24 11:39:21.523: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 8.011754703s + Aug 24 11:39:23.525: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 10.013286028s + Aug 24 11:39:25.527: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 12.015187536s + Aug 24 11:39:27.526: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Pending", Reason="", readiness=false. Elapsed: 14.014417955s + Aug 24 11:39:29.527: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237": Phase="Succeeded", Reason="", readiness=false. Elapsed: 16.01509674s + STEP: Saw pod success 08/24/23 11:39:29.527 + Aug 24 11:39:29.528: INFO: Pod "pod-9d704f62-4d95-49c5-b380-04509ac78237" satisfied condition "Succeeded or Failed" + Aug 24 11:39:29.534: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-9d704f62-4d95-49c5-b380-04509ac78237 container test-container: + STEP: delete the pod 08/24/23 11:39:29.568 + Aug 24 11:39:29.590: INFO: Waiting for pod pod-9d704f62-4d95-49c5-b380-04509ac78237 to disappear + Aug 24 11:39:29.597: INFO: Pod pod-9d704f62-4d95-49c5-b380-04509ac78237 no longer exists + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 15:30:08.840: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Secrets + Aug 24 11:39:29.598: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Secrets + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Secrets + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-7782" for this suite. 07/29/23 15:30:08.846 + STEP: Destroying namespace "emptydir-582" for this suite. 08/24/23 11:39:29.606 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSS ------------------------------ -[sig-node] InitContainer [NodeConformance] - should invoke init containers on a RestartNever pod [Conformance] - test/e2e/common/node/init_container.go:177 -[BeforeEach] [sig-node] InitContainer [NodeConformance] +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:99 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:30:08.868 -Jul 29 15:30:08.868: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename init-container 07/29/23 15:30:08.87 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:30:08.898 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:30:08.902 -[BeforeEach] [sig-node] InitContainer [NodeConformance] +STEP: Creating a kubernetes client 08/24/23 11:39:29.619 +Aug 24 11:39:29.619: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 11:39:29.623 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:39:29.651 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:39:29.654 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] InitContainer [NodeConformance] - test/e2e/common/node/init_container.go:165 -[It] should invoke init containers on a RestartNever pod [Conformance] - test/e2e/common/node/init_container.go:177 -STEP: creating the pod 07/29/23 15:30:08.905 -Jul 29 15:30:08.906: INFO: PodSpec: initContainers in spec.initContainers -[AfterEach] [sig-node] InitContainer [NodeConformance] +[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:99 +STEP: Creating configMap with name configmap-test-volume-map-27c0ae38-d4e5-4f42-bd9f-0ae4fc607487 08/24/23 11:39:29.657 +STEP: Creating a pod to test consume configMaps 08/24/23 11:39:29.665 +Aug 24 11:39:29.679: INFO: Waiting up to 5m0s for pod "pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e" in namespace "configmap-8803" to be "Succeeded or Failed" +Aug 24 11:39:29.689: INFO: Pod "pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e": Phase="Pending", Reason="", readiness=false. Elapsed: 9.846731ms +Aug 24 11:39:31.698: INFO: Pod "pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018640343s +Aug 24 11:39:33.701: INFO: Pod "pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022000598s +STEP: Saw pod success 08/24/23 11:39:33.701 +Aug 24 11:39:33.702: INFO: Pod "pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e" satisfied condition "Succeeded or Failed" +Aug 24 11:39:33.708: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e container agnhost-container: +STEP: delete the pod 08/24/23 11:39:33.72 +Aug 24 11:39:33.744: INFO: Waiting for pod pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e to disappear +Aug 24 11:39:33.751: INFO: Pod pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e no longer exists +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 15:30:14.211: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +Aug 24 11:39:33.751: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "init-container-3612" for this suite. 07/29/23 15:30:14.226 +STEP: Destroying namespace "configmap-8803" for this suite. 08/24/23 11:39:33.758 ------------------------------ -• [SLOW TEST] [5.373 seconds] -[sig-node] InitContainer [NodeConformance] -test/e2e/common/node/framework.go:23 - should invoke init containers on a RestartNever pod [Conformance] - test/e2e/common/node/init_container.go:177 +• [4.157 seconds] +[sig-storage] ConfigMap +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:99 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] InitContainer [NodeConformance] + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:30:08.868 - Jul 29 15:30:08.868: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename init-container 07/29/23 15:30:08.87 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:30:08.898 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:30:08.902 - [BeforeEach] [sig-node] InitContainer [NodeConformance] + STEP: Creating a kubernetes client 08/24/23 11:39:29.619 + Aug 24 11:39:29.619: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 11:39:29.623 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:39:29.651 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:39:29.654 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] InitContainer [NodeConformance] - test/e2e/common/node/init_container.go:165 - [It] should invoke init containers on a RestartNever pod [Conformance] - test/e2e/common/node/init_container.go:177 - STEP: creating the pod 07/29/23 15:30:08.905 - Jul 29 15:30:08.906: INFO: PodSpec: initContainers in spec.initContainers - [AfterEach] [sig-node] InitContainer [NodeConformance] + [It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:99 + STEP: Creating configMap with name configmap-test-volume-map-27c0ae38-d4e5-4f42-bd9f-0ae4fc607487 08/24/23 11:39:29.657 + STEP: Creating a pod to test consume configMaps 08/24/23 11:39:29.665 + Aug 24 11:39:29.679: INFO: Waiting up to 5m0s for pod "pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e" in namespace "configmap-8803" to be "Succeeded or Failed" + Aug 24 11:39:29.689: INFO: Pod "pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e": Phase="Pending", Reason="", readiness=false. Elapsed: 9.846731ms + Aug 24 11:39:31.698: INFO: Pod "pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018640343s + Aug 24 11:39:33.701: INFO: Pod "pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022000598s + STEP: Saw pod success 08/24/23 11:39:33.701 + Aug 24 11:39:33.702: INFO: Pod "pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e" satisfied condition "Succeeded or Failed" + Aug 24 11:39:33.708: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e container agnhost-container: + STEP: delete the pod 08/24/23 11:39:33.72 + Aug 24 11:39:33.744: INFO: Waiting for pod pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e to disappear + Aug 24 11:39:33.751: INFO: Pod pod-configmaps-0735dabf-60e7-4156-87cc-6b76bd7de40e no longer exists + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 15:30:14.211: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + Aug 24 11:39:33.751: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "init-container-3612" for this suite. 07/29/23 15:30:14.226 + STEP: Destroying namespace "configmap-8803" for this suite. 08/24/23 11:39:33.758 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should provide container's memory limit [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:207 -[BeforeEach] [sig-storage] Projected downwardAPI +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of same group but different versions [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:309 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:30:14.243 -Jul 29 15:30:14.243: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 15:30:14.245 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:30:14.277 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:30:14.282 -[BeforeEach] [sig-storage] Projected downwardAPI +STEP: Creating a kubernetes client 08/24/23 11:39:33.78 +Aug 24 11:39:33.780: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 11:39:33.781 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:39:33.816 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:39:33.825 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should provide container's memory limit [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:207 -STEP: Creating a pod to test downward API volume plugin 07/29/23 15:30:14.287 -Jul 29 15:30:14.308: INFO: Waiting up to 5m0s for pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989" in namespace "projected-1090" to be "Succeeded or Failed" -Jul 29 15:30:14.317: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 9.137347ms -Jul 29 15:30:16.325: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017455408s -Jul 29 15:30:18.327: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018739953s -Jul 29 15:30:20.324: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 6.016666216s -Jul 29 15:30:22.326: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 8.018258117s -Jul 29 15:30:24.325: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 10.017519362s -Jul 29 15:30:26.325: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 12.017095717s -Jul 29 15:30:28.336: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 14.028651649s -Jul 29 15:30:30.325: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Succeeded", Reason="", readiness=false. Elapsed: 16.016970113s -STEP: Saw pod success 07/29/23 15:30:30.325 -Jul 29 15:30:30.326: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989" satisfied condition "Succeeded or Failed" -Jul 29 15:30:30.331: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989 container client-container: -STEP: delete the pod 07/29/23 15:30:30.342 -Jul 29 15:30:30.366: INFO: Waiting for pod downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989 to disappear -Jul 29 15:30:30.370: INFO: Pod downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +[It] works for multiple CRDs of same group but different versions [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:309 +STEP: CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation 08/24/23 11:39:33.835 +Aug 24 11:39:33.837: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation 08/24/23 11:39:43.357 +Aug 24 11:39:43.358: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:39:46.062: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 15:30:30.371: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +Aug 24 11:39:56.551: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "projected-1090" for this suite. 07/29/23 15:30:30.38 +STEP: Destroying namespace "crd-publish-openapi-9510" for this suite. 08/24/23 11:39:56.573 ------------------------------ -• [SLOW TEST] [16.150 seconds] -[sig-storage] Projected downwardAPI -test/e2e/common/storage/framework.go:23 - should provide container's memory limit [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:207 +• [SLOW TEST] [22.804 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + works for multiple CRDs of same group but different versions [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:309 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:30:14.243 - Jul 29 15:30:14.243: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 15:30:14.245 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:30:14.277 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:30:14.282 - [BeforeEach] [sig-storage] Projected downwardAPI + STEP: Creating a kubernetes client 08/24/23 11:39:33.78 + Aug 24 11:39:33.780: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 11:39:33.781 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:39:33.816 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:39:33.825 + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should provide container's memory limit [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:207 - STEP: Creating a pod to test downward API volume plugin 07/29/23 15:30:14.287 - Jul 29 15:30:14.308: INFO: Waiting up to 5m0s for pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989" in namespace "projected-1090" to be "Succeeded or Failed" - Jul 29 15:30:14.317: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 9.137347ms - Jul 29 15:30:16.325: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017455408s - Jul 29 15:30:18.327: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018739953s - Jul 29 15:30:20.324: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 6.016666216s - Jul 29 15:30:22.326: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 8.018258117s - Jul 29 15:30:24.325: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 10.017519362s - Jul 29 15:30:26.325: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 12.017095717s - Jul 29 15:30:28.336: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Pending", Reason="", readiness=false. Elapsed: 14.028651649s - Jul 29 15:30:30.325: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989": Phase="Succeeded", Reason="", readiness=false. Elapsed: 16.016970113s - STEP: Saw pod success 07/29/23 15:30:30.325 - Jul 29 15:30:30.326: INFO: Pod "downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989" satisfied condition "Succeeded or Failed" - Jul 29 15:30:30.331: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989 container client-container: - STEP: delete the pod 07/29/23 15:30:30.342 - Jul 29 15:30:30.366: INFO: Waiting for pod downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989 to disappear - Jul 29 15:30:30.370: INFO: Pod downwardapi-volume-cb3afb3e-0ead-49ac-afa5-e6891bdd7989 no longer exists - [AfterEach] [sig-storage] Projected downwardAPI + [It] works for multiple CRDs of same group but different versions [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:309 + STEP: CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation 08/24/23 11:39:33.835 + Aug 24 11:39:33.837: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation 08/24/23 11:39:43.357 + Aug 24 11:39:43.358: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:39:46.062: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 15:30:30.371: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 11:39:56.551: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "projected-1090" for this suite. 07/29/23 15:30:30.38 + STEP: Destroying namespace "crd-publish-openapi-9510" for this suite. 08/24/23 11:39:56.573 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSS +SSSSSSSSS ------------------------------ -[sig-network] EndpointSlice - should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] - test/e2e/network/endpointslice.go:205 -[BeforeEach] [sig-network] EndpointSlice +[sig-storage] ConfigMap + optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:240 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:30:30.397 -Jul 29 15:30:30.397: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename endpointslice 07/29/23 15:30:30.4 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:30:30.428 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:30:30.433 -[BeforeEach] [sig-network] EndpointSlice +STEP: Creating a kubernetes client 08/24/23 11:39:56.585 +Aug 24 11:39:56.585: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 11:39:56.587 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:39:56.63 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:39:56.635 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] EndpointSlice - test/e2e/network/endpointslice.go:52 -[It] should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] - test/e2e/network/endpointslice.go:205 -STEP: referencing a single matching pod 07/29/23 15:30:35.635 -STEP: referencing matching pods with named port 07/29/23 15:30:40.655 -STEP: creating empty Endpoints and EndpointSlices for no matching Pods 07/29/23 15:30:45.68 -STEP: recreating EndpointSlices after they've been deleted 07/29/23 15:30:50.699 -Jul 29 15:30:50.748: INFO: EndpointSlice for Service endpointslice-9928/example-named-port not found -[AfterEach] [sig-network] EndpointSlice +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:240 +STEP: Creating configMap with name cm-test-opt-del-512a52d9-1e8f-4493-a78a-c9174abb4025 08/24/23 11:39:56.653 +STEP: Creating configMap with name cm-test-opt-upd-25bb1507-5430-47ff-a71c-7ff77df1b903 08/24/23 11:39:56.665 +STEP: Creating the pod 08/24/23 11:39:56.673 +Aug 24 11:39:56.689: INFO: Waiting up to 5m0s for pod "pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc" in namespace "configmap-141" to be "running and ready" +Aug 24 11:39:56.698: INFO: Pod "pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc": Phase="Pending", Reason="", readiness=false. Elapsed: 8.315763ms +Aug 24 11:39:56.698: INFO: The phase of Pod pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:39:58.708: INFO: Pod "pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc": Phase="Running", Reason="", readiness=true. Elapsed: 2.0191577s +Aug 24 11:39:58.709: INFO: The phase of Pod pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc is Running (Ready = true) +Aug 24 11:39:58.709: INFO: Pod "pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc" satisfied condition "running and ready" +STEP: Deleting configmap cm-test-opt-del-512a52d9-1e8f-4493-a78a-c9174abb4025 08/24/23 11:39:58.771 +STEP: Updating configmap cm-test-opt-upd-25bb1507-5430-47ff-a71c-7ff77df1b903 08/24/23 11:39:58.785 +STEP: Creating configMap with name cm-test-opt-create-92d519e1-9723-49b0-9fad-a7486fda7de6 08/24/23 11:39:58.798 +STEP: waiting to observe update in volume 08/24/23 11:39:58.807 +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:00.768: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] EndpointSlice +Aug 24 11:40:00.885: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] EndpointSlice +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] EndpointSlice +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "endpointslice-9928" for this suite. 07/29/23 15:31:00.778 +STEP: Destroying namespace "configmap-141" for this suite. 08/24/23 11:40:00.897 ------------------------------ -• [SLOW TEST] [30.395 seconds] -[sig-network] EndpointSlice -test/e2e/network/common/framework.go:23 - should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] - test/e2e/network/endpointslice.go:205 +• [4.330 seconds] +[sig-storage] ConfigMap +test/e2e/common/storage/framework.go:23 + optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:240 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] EndpointSlice + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:30:30.397 - Jul 29 15:30:30.397: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename endpointslice 07/29/23 15:30:30.4 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:30:30.428 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:30:30.433 - [BeforeEach] [sig-network] EndpointSlice + STEP: Creating a kubernetes client 08/24/23 11:39:56.585 + Aug 24 11:39:56.585: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 11:39:56.587 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:39:56.63 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:39:56.635 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] EndpointSlice - test/e2e/network/endpointslice.go:52 - [It] should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] - test/e2e/network/endpointslice.go:205 - STEP: referencing a single matching pod 07/29/23 15:30:35.635 - STEP: referencing matching pods with named port 07/29/23 15:30:40.655 - STEP: creating empty Endpoints and EndpointSlices for no matching Pods 07/29/23 15:30:45.68 - STEP: recreating EndpointSlices after they've been deleted 07/29/23 15:30:50.699 - Jul 29 15:30:50.748: INFO: EndpointSlice for Service endpointslice-9928/example-named-port not found - [AfterEach] [sig-network] EndpointSlice + [It] optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:240 + STEP: Creating configMap with name cm-test-opt-del-512a52d9-1e8f-4493-a78a-c9174abb4025 08/24/23 11:39:56.653 + STEP: Creating configMap with name cm-test-opt-upd-25bb1507-5430-47ff-a71c-7ff77df1b903 08/24/23 11:39:56.665 + STEP: Creating the pod 08/24/23 11:39:56.673 + Aug 24 11:39:56.689: INFO: Waiting up to 5m0s for pod "pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc" in namespace "configmap-141" to be "running and ready" + Aug 24 11:39:56.698: INFO: Pod "pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc": Phase="Pending", Reason="", readiness=false. Elapsed: 8.315763ms + Aug 24 11:39:56.698: INFO: The phase of Pod pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:39:58.708: INFO: Pod "pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc": Phase="Running", Reason="", readiness=true. Elapsed: 2.0191577s + Aug 24 11:39:58.709: INFO: The phase of Pod pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc is Running (Ready = true) + Aug 24 11:39:58.709: INFO: Pod "pod-configmaps-c36d4986-7b91-46e5-866d-47e627c07fcc" satisfied condition "running and ready" + STEP: Deleting configmap cm-test-opt-del-512a52d9-1e8f-4493-a78a-c9174abb4025 08/24/23 11:39:58.771 + STEP: Updating configmap cm-test-opt-upd-25bb1507-5430-47ff-a71c-7ff77df1b903 08/24/23 11:39:58.785 + STEP: Creating configMap with name cm-test-opt-create-92d519e1-9723-49b0-9fad-a7486fda7de6 08/24/23 11:39:58.798 + STEP: waiting to observe update in volume 08/24/23 11:39:58.807 + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:00.768: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] EndpointSlice + Aug 24 11:40:00.885: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] EndpointSlice + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] EndpointSlice + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "endpointslice-9928" for this suite. 07/29/23 15:31:00.778 + STEP: Destroying namespace "configmap-141" for this suite. 08/24/23 11:40:00.897 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSS ------------------------------ -[sig-api-machinery] Namespaces [Serial] - should apply a finalizer to a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:394 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +[sig-api-machinery] Watchers + should observe add, update, and delete watch notifications on configmaps [Conformance] + test/e2e/apimachinery/watch.go:60 +[BeforeEach] [sig-api-machinery] Watchers set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:00.803 -Jul 29 15:31:00.803: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename namespaces 07/29/23 15:31:00.806 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:00.84 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:00.848 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +STEP: Creating a kubernetes client 08/24/23 11:40:00.925 +Aug 24 11:40:00.926: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename watch 08/24/23 11:40:00.93 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:40:00.966 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:40:00.972 +[BeforeEach] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:31 -[It] should apply a finalizer to a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:394 -STEP: Creating namespace "e2e-ns-lsdm5" 07/29/23 15:31:00.855 -Jul 29 15:31:00.881: INFO: Namespace "e2e-ns-lsdm5-8463" has []v1.FinalizerName{"kubernetes"} -STEP: Adding e2e finalizer to namespace "e2e-ns-lsdm5-8463" 07/29/23 15:31:00.882 -Jul 29 15:31:00.898: INFO: Namespace "e2e-ns-lsdm5-8463" has []v1.FinalizerName{"kubernetes", "e2e.example.com/fakeFinalizer"} -STEP: Removing e2e finalizer from namespace "e2e-ns-lsdm5-8463" 07/29/23 15:31:00.898 -Jul 29 15:31:00.915: INFO: Namespace "e2e-ns-lsdm5-8463" has []v1.FinalizerName{"kubernetes"} -[AfterEach] [sig-api-machinery] Namespaces [Serial] +[It] should observe add, update, and delete watch notifications on configmaps [Conformance] + test/e2e/apimachinery/watch.go:60 +STEP: creating a watch on configmaps with label A 08/24/23 11:40:00.978 +STEP: creating a watch on configmaps with label B 08/24/23 11:40:00.98 +STEP: creating a watch on configmaps with label A or B 08/24/23 11:40:00.983 +STEP: creating a configmap with label A and ensuring the correct watchers observe the notification 08/24/23 11:40:00.985 +Aug 24 11:40:00.997: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3696 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 11:40:00.998: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3696 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying configmap A and ensuring the correct watchers observe the notification 08/24/23 11:40:00.998 +Aug 24 11:40:01.019: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3697 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 11:40:01.020: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3697 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying configmap A again and ensuring the correct watchers observe the notification 08/24/23 11:40:01.02 +Aug 24 11:40:01.042: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3698 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 11:40:01.042: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3698 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: deleting configmap A and ensuring the correct watchers observe the notification 08/24/23 11:40:01.043 +Aug 24 11:40:01.062: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3699 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 11:40:01.062: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3699 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: creating a configmap with label B and ensuring the correct watchers observe the notification 08/24/23 11:40:01.062 +Aug 24 11:40:01.074: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-4634 43048ee0-21f0-4410-a51e-c19b47370d8c 3700 0 2023-08-24 11:40:01 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:01 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 11:40:01.075: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-4634 43048ee0-21f0-4410-a51e-c19b47370d8c 3700 0 2023-08-24 11:40:01 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:01 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: deleting configmap B and ensuring the correct watchers observe the notification 08/24/23 11:40:11.076 +Aug 24 11:40:11.119: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-4634 43048ee0-21f0-4410-a51e-c19b47370d8c 3747 0 2023-08-24 11:40:01 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:01 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 11:40:11.119: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-4634 43048ee0-21f0-4410-a51e-c19b47370d8c 3747 0 2023-08-24 11:40:01 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:01 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:00.916: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +Aug 24 11:40:21.123: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +[DeferCleanup (Each)] [sig-api-machinery] Watchers dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +[DeferCleanup (Each)] [sig-api-machinery] Watchers tear down framework | framework.go:193 -STEP: Destroying namespace "namespaces-8367" for this suite. 07/29/23 15:31:00.923 -STEP: Destroying namespace "e2e-ns-lsdm5-8463" for this suite. 07/29/23 15:31:00.933 +STEP: Destroying namespace "watch-4634" for this suite. 08/24/23 11:40:21.135 ------------------------------ -• [0.143 seconds] -[sig-api-machinery] Namespaces [Serial] +• [SLOW TEST] [20.221 seconds] +[sig-api-machinery] Watchers test/e2e/apimachinery/framework.go:23 - should apply a finalizer to a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:394 + should observe add, update, and delete watch notifications on configmaps [Conformance] + test/e2e/apimachinery/watch.go:60 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Namespaces [Serial] + [BeforeEach] [sig-api-machinery] Watchers set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:00.803 - Jul 29 15:31:00.803: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename namespaces 07/29/23 15:31:00.806 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:00.84 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:00.848 - [BeforeEach] [sig-api-machinery] Namespaces [Serial] + STEP: Creating a kubernetes client 08/24/23 11:40:00.925 + Aug 24 11:40:00.926: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename watch 08/24/23 11:40:00.93 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:40:00.966 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:40:00.972 + [BeforeEach] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:31 - [It] should apply a finalizer to a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:394 - STEP: Creating namespace "e2e-ns-lsdm5" 07/29/23 15:31:00.855 - Jul 29 15:31:00.881: INFO: Namespace "e2e-ns-lsdm5-8463" has []v1.FinalizerName{"kubernetes"} - STEP: Adding e2e finalizer to namespace "e2e-ns-lsdm5-8463" 07/29/23 15:31:00.882 - Jul 29 15:31:00.898: INFO: Namespace "e2e-ns-lsdm5-8463" has []v1.FinalizerName{"kubernetes", "e2e.example.com/fakeFinalizer"} - STEP: Removing e2e finalizer from namespace "e2e-ns-lsdm5-8463" 07/29/23 15:31:00.898 - Jul 29 15:31:00.915: INFO: Namespace "e2e-ns-lsdm5-8463" has []v1.FinalizerName{"kubernetes"} - [AfterEach] [sig-api-machinery] Namespaces [Serial] + [It] should observe add, update, and delete watch notifications on configmaps [Conformance] + test/e2e/apimachinery/watch.go:60 + STEP: creating a watch on configmaps with label A 08/24/23 11:40:00.978 + STEP: creating a watch on configmaps with label B 08/24/23 11:40:00.98 + STEP: creating a watch on configmaps with label A or B 08/24/23 11:40:00.983 + STEP: creating a configmap with label A and ensuring the correct watchers observe the notification 08/24/23 11:40:00.985 + Aug 24 11:40:00.997: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3696 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 11:40:00.998: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3696 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} + STEP: modifying configmap A and ensuring the correct watchers observe the notification 08/24/23 11:40:00.998 + Aug 24 11:40:01.019: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3697 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 11:40:01.020: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3697 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} + STEP: modifying configmap A again and ensuring the correct watchers observe the notification 08/24/23 11:40:01.02 + Aug 24 11:40:01.042: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3698 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 11:40:01.042: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3698 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} + STEP: deleting configmap A and ensuring the correct watchers observe the notification 08/24/23 11:40:01.043 + Aug 24 11:40:01.062: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3699 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 11:40:01.062: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-4634 8c60aa99-b907-465d-995d-4acb603372e5 3699 0 2023-08-24 11:40:00 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:00 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} + STEP: creating a configmap with label B and ensuring the correct watchers observe the notification 08/24/23 11:40:01.062 + Aug 24 11:40:01.074: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-4634 43048ee0-21f0-4410-a51e-c19b47370d8c 3700 0 2023-08-24 11:40:01 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:01 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 11:40:01.075: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-4634 43048ee0-21f0-4410-a51e-c19b47370d8c 3700 0 2023-08-24 11:40:01 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:01 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} + STEP: deleting configmap B and ensuring the correct watchers observe the notification 08/24/23 11:40:11.076 + Aug 24 11:40:11.119: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-4634 43048ee0-21f0-4410-a51e-c19b47370d8c 3747 0 2023-08-24 11:40:01 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:01 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 11:40:11.119: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-4634 43048ee0-21f0-4410-a51e-c19b47370d8c 3747 0 2023-08-24 11:40:01 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-08-24 11:40:01 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} + [AfterEach] [sig-api-machinery] Watchers test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:00.916: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + Aug 24 11:40:21.123: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + [DeferCleanup (Each)] [sig-api-machinery] Watchers dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + [DeferCleanup (Each)] [sig-api-machinery] Watchers tear down framework | framework.go:193 - STEP: Destroying namespace "namespaces-8367" for this suite. 07/29/23 15:31:00.923 - STEP: Destroying namespace "e2e-ns-lsdm5-8463" for this suite. 07/29/23 15:31:00.933 + STEP: Destroying namespace "watch-4634" for this suite. 08/24/23 11:40:21.135 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for CRD with validation schema [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:69 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:00.954 -Jul 29 15:31:00.955: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 15:31:00.959 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:00.986 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:00.99 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:31 -[It] works for CRD with validation schema [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:69 -Jul 29 15:31:00.997: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: kubectl validation (kubectl create and apply) allows request with known and required properties 07/29/23 15:31:03.997 -Jul 29 15:31:03.998: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 create -f -' -Jul 29 15:31:05.609: INFO: stderr: "" -Jul 29 15:31:05.609: INFO: stdout: "e2e-test-crd-publish-openapi-1403-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" -Jul 29 15:31:05.609: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 delete e2e-test-crd-publish-openapi-1403-crds test-foo' -Jul 29 15:31:05.767: INFO: stderr: "" -Jul 29 15:31:05.768: INFO: stdout: "e2e-test-crd-publish-openapi-1403-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" -Jul 29 15:31:05.768: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 apply -f -' -Jul 29 15:31:07.756: INFO: stderr: "" -Jul 29 15:31:07.757: INFO: stdout: "e2e-test-crd-publish-openapi-1403-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" -Jul 29 15:31:07.757: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 delete e2e-test-crd-publish-openapi-1403-crds test-foo' -Jul 29 15:31:07.936: INFO: stderr: "" -Jul 29 15:31:07.936: INFO: stdout: "e2e-test-crd-publish-openapi-1403-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" -STEP: kubectl validation (kubectl create and apply) rejects request with value outside defined enum values 07/29/23 15:31:07.936 -Jul 29 15:31:07.937: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 create -f -' -Jul 29 15:31:08.418: INFO: rc: 1 -STEP: kubectl validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema 07/29/23 15:31:08.418 -Jul 29 15:31:08.419: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 create -f -' -Jul 29 15:31:08.846: INFO: rc: 1 -Jul 29 15:31:08.846: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 apply -f -' -Jul 29 15:31:09.286: INFO: rc: 1 -STEP: kubectl validation (kubectl create and apply) rejects request without required properties 07/29/23 15:31:09.286 -Jul 29 15:31:09.288: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 create -f -' -Jul 29 15:31:09.692: INFO: rc: 1 -Jul 29 15:31:09.692: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 apply -f -' -Jul 29 15:31:10.089: INFO: rc: 1 -STEP: kubectl explain works to explain CR properties 07/29/23 15:31:10.089 -Jul 29 15:31:10.090: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 explain e2e-test-crd-publish-openapi-1403-crds' -Jul 29 15:31:10.479: INFO: stderr: "" -Jul 29 15:31:10.479: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-1403-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nDESCRIPTION:\n Foo CRD for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t\n Specification of Foo\n\n status\t\n Status of Foo\n\n" -STEP: kubectl explain works to explain CR properties recursively 07/29/23 15:31:10.48 -Jul 29 15:31:10.481: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 explain e2e-test-crd-publish-openapi-1403-crds.metadata' -Jul 29 15:31:10.914: INFO: stderr: "" -Jul 29 15:31:10.914: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-1403-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: metadata \n\nDESCRIPTION:\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n ObjectMeta is metadata that all persisted resources must have, which\n includes all objects users must create.\n\nFIELDS:\n annotations\t\n Annotations is an unstructured key value map stored with a resource that\n may be set by external tools to store and retrieve arbitrary metadata. They\n are not queryable and should be preserved when modifying objects. More\n info: http://kubernetes.io/docs/user-guide/annotations\n\n creationTimestamp\t\n CreationTimestamp is a timestamp representing the server time when this\n object was created. It is not guaranteed to be set in happens-before order\n across separate operations. Clients may not set this value. It is\n represented in RFC3339 form and is in UTC.\n\n Populated by the system. Read-only. Null for lists. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n deletionGracePeriodSeconds\t\n Number of seconds allowed for this object to gracefully terminate before it\n will be removed from the system. Only set when deletionTimestamp is also\n set. May only be shortened. Read-only.\n\n deletionTimestamp\t\n DeletionTimestamp is RFC 3339 date and time at which this resource will be\n deleted. This field is set by the server when a graceful deletion is\n requested by the user, and is not directly settable by a client. The\n resource is expected to be deleted (no longer visible from resource lists,\n and not reachable by name) after the time in this field, once the\n finalizers list is empty. As long as the finalizers list contains items,\n deletion is blocked. Once the deletionTimestamp is set, this value may not\n be unset or be set further into the future, although it may be shortened or\n the resource may be deleted prior to this time. For example, a user may\n request that a pod is deleted in 30 seconds. The Kubelet will react by\n sending a graceful termination signal to the containers in the pod. After\n that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL)\n to the container and after cleanup, remove the pod from the API. In the\n presence of network partitions, this object may still exist after this\n timestamp, until an administrator or automated process can determine the\n resource is fully terminated. If not set, graceful deletion of the object\n has not been requested.\n\n Populated by the system when a graceful deletion is requested. Read-only.\n More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n finalizers\t<[]string>\n Must be empty before the object is deleted from the registry. Each entry is\n an identifier for the responsible component that will remove the entry from\n the list. If the deletionTimestamp of the object is non-nil, entries in\n this list can only be removed. Finalizers may be processed and removed in\n any order. Order is NOT enforced because it introduces significant risk of\n stuck finalizers. finalizers is a shared field, any actor with permission\n can reorder it. If the finalizer list is processed in order, then this can\n lead to a situation in which the component responsible for the first\n finalizer in the list is waiting for a signal (field value, external\n system, or other) produced by a component responsible for a finalizer later\n in the list, resulting in a deadlock. Without enforced ordering finalizers\n are free to order amongst themselves and are not vulnerable to ordering\n changes in the list.\n\n generateName\t\n GenerateName is an optional prefix, used by the server, to generate a\n unique name ONLY IF the Name field has not been provided. If this field is\n used, the name returned to the client will be different than the name\n passed. This value will also be combined with a unique suffix. The provided\n value has the same validation rules as the Name field, and may be truncated\n by the length of the suffix required to make the value unique on the\n server.\n\n If this field is specified and the generated name exists, the server will\n return a 409.\n\n Applied only if Name is not specified. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n\n generation\t\n A sequence number representing a specific generation of the desired state.\n Populated by the system. Read-only.\n\n labels\t\n Map of string keys and values that can be used to organize and categorize\n (scope and select) objects. May match selectors of replication controllers\n and services. More info: http://kubernetes.io/docs/user-guide/labels\n\n managedFields\t<[]Object>\n ManagedFields maps workflow-id and version to the set of fields that are\n managed by that workflow. This is mostly for internal housekeeping, and\n users typically shouldn't need to set or understand this field. A workflow\n can be the user's name, a controller's name, or the name of a specific\n apply path like \"ci-cd\". The set of fields is always in the version that\n the workflow used when modifying the object.\n\n name\t\n Name must be unique within a namespace. Is required when creating\n resources, although some resources may allow a client to request the\n generation of an appropriate name automatically. Name is primarily intended\n for creation idempotence and configuration definition. Cannot be updated.\n More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n namespace\t\n Namespace defines the space within which each name must be unique. An empty\n namespace is equivalent to the \"default\" namespace, but \"default\" is the\n canonical representation. Not all objects are required to be scoped to a\n namespace - the value of this field for those objects will be empty.\n\n Must be a DNS_LABEL. Cannot be updated. More info:\n http://kubernetes.io/docs/user-guide/namespaces\n\n ownerReferences\t<[]Object>\n List of objects depended by this object. If ALL objects in the list have\n been deleted, this object will be garbage collected. If this object is\n managed by a controller, then an entry in this list will point to this\n controller, with the controller field set to true. There cannot be more\n than one managing controller.\n\n resourceVersion\t\n An opaque value that represents the internal version of this object that\n can be used by clients to determine when objects have changed. May be used\n for optimistic concurrency, change detection, and the watch operation on a\n resource or set of resources. Clients must treat these values as opaque and\n passed unmodified back to the server. They may only be valid for a\n particular resource or set of resources.\n\n Populated by the system. Read-only. Value must be treated as opaque by\n clients and . More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n\n selfLink\t\n Deprecated: selfLink is a legacy read-only field that is no longer\n populated by the system.\n\n uid\t\n UID is the unique in time and space value for this object. It is typically\n generated by the server on successful creation of a resource and is not\n allowed to change on PUT operations.\n\n Populated by the system. Read-only. More info:\n http://kubernetes.io/docs/user-guide/identifiers#uids\n\n" -Jul 29 15:31:10.915: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 explain e2e-test-crd-publish-openapi-1403-crds.spec' -Jul 29 15:31:11.388: INFO: stderr: "" -Jul 29 15:31:11.388: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-1403-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: spec \n\nDESCRIPTION:\n Specification of Foo\n\nFIELDS:\n bars\t<[]Object>\n List of Bars and their specs.\n\n" -Jul 29 15:31:11.389: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 explain e2e-test-crd-publish-openapi-1403-crds.spec.bars' -Jul 29 15:31:11.796: INFO: stderr: "" -Jul 29 15:31:11.796: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-1403-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: bars <[]Object>\n\nDESCRIPTION:\n List of Bars and their specs.\n\nFIELDS:\n age\t\n Age of Bar.\n\n bazs\t<[]string>\n List of Bazs.\n\n feeling\t\n Whether Bar is feeling great.\n\n name\t -required-\n Name of Bar.\n\n" -STEP: kubectl explain works to return error when explain is called on property that doesn't exist 07/29/23 15:31:11.796 -Jul 29 15:31:11.796: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 explain e2e-test-crd-publish-openapi-1403-crds.spec.bars2' -Jul 29 15:31:12.184: INFO: rc: 1 -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:14.647: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - tear down framework | framework.go:193 -STEP: Destroying namespace "crd-publish-openapi-3062" for this suite. 07/29/23 15:31:14.663 ------------------------------- -• [SLOW TEST] [13.718 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - works for CRD with validation schema [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:69 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:00.954 - Jul 29 15:31:00.955: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 15:31:00.959 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:00.986 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:00.99 - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:31 - [It] works for CRD with validation schema [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:69 - Jul 29 15:31:00.997: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: kubectl validation (kubectl create and apply) allows request with known and required properties 07/29/23 15:31:03.997 - Jul 29 15:31:03.998: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 create -f -' - Jul 29 15:31:05.609: INFO: stderr: "" - Jul 29 15:31:05.609: INFO: stdout: "e2e-test-crd-publish-openapi-1403-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" - Jul 29 15:31:05.609: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 delete e2e-test-crd-publish-openapi-1403-crds test-foo' - Jul 29 15:31:05.767: INFO: stderr: "" - Jul 29 15:31:05.768: INFO: stdout: "e2e-test-crd-publish-openapi-1403-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" - Jul 29 15:31:05.768: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 apply -f -' - Jul 29 15:31:07.756: INFO: stderr: "" - Jul 29 15:31:07.757: INFO: stdout: "e2e-test-crd-publish-openapi-1403-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" - Jul 29 15:31:07.757: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 delete e2e-test-crd-publish-openapi-1403-crds test-foo' - Jul 29 15:31:07.936: INFO: stderr: "" - Jul 29 15:31:07.936: INFO: stdout: "e2e-test-crd-publish-openapi-1403-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" - STEP: kubectl validation (kubectl create and apply) rejects request with value outside defined enum values 07/29/23 15:31:07.936 - Jul 29 15:31:07.937: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 create -f -' - Jul 29 15:31:08.418: INFO: rc: 1 - STEP: kubectl validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema 07/29/23 15:31:08.418 - Jul 29 15:31:08.419: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 create -f -' - Jul 29 15:31:08.846: INFO: rc: 1 - Jul 29 15:31:08.846: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 apply -f -' - Jul 29 15:31:09.286: INFO: rc: 1 - STEP: kubectl validation (kubectl create and apply) rejects request without required properties 07/29/23 15:31:09.286 - Jul 29 15:31:09.288: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 create -f -' - Jul 29 15:31:09.692: INFO: rc: 1 - Jul 29 15:31:09.692: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 --namespace=crd-publish-openapi-3062 apply -f -' - Jul 29 15:31:10.089: INFO: rc: 1 - STEP: kubectl explain works to explain CR properties 07/29/23 15:31:10.089 - Jul 29 15:31:10.090: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 explain e2e-test-crd-publish-openapi-1403-crds' - Jul 29 15:31:10.479: INFO: stderr: "" - Jul 29 15:31:10.479: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-1403-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nDESCRIPTION:\n Foo CRD for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t\n Specification of Foo\n\n status\t\n Status of Foo\n\n" - STEP: kubectl explain works to explain CR properties recursively 07/29/23 15:31:10.48 - Jul 29 15:31:10.481: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 explain e2e-test-crd-publish-openapi-1403-crds.metadata' - Jul 29 15:31:10.914: INFO: stderr: "" - Jul 29 15:31:10.914: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-1403-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: metadata \n\nDESCRIPTION:\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n ObjectMeta is metadata that all persisted resources must have, which\n includes all objects users must create.\n\nFIELDS:\n annotations\t\n Annotations is an unstructured key value map stored with a resource that\n may be set by external tools to store and retrieve arbitrary metadata. They\n are not queryable and should be preserved when modifying objects. More\n info: http://kubernetes.io/docs/user-guide/annotations\n\n creationTimestamp\t\n CreationTimestamp is a timestamp representing the server time when this\n object was created. It is not guaranteed to be set in happens-before order\n across separate operations. Clients may not set this value. It is\n represented in RFC3339 form and is in UTC.\n\n Populated by the system. Read-only. Null for lists. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n deletionGracePeriodSeconds\t\n Number of seconds allowed for this object to gracefully terminate before it\n will be removed from the system. Only set when deletionTimestamp is also\n set. May only be shortened. Read-only.\n\n deletionTimestamp\t\n DeletionTimestamp is RFC 3339 date and time at which this resource will be\n deleted. This field is set by the server when a graceful deletion is\n requested by the user, and is not directly settable by a client. The\n resource is expected to be deleted (no longer visible from resource lists,\n and not reachable by name) after the time in this field, once the\n finalizers list is empty. As long as the finalizers list contains items,\n deletion is blocked. Once the deletionTimestamp is set, this value may not\n be unset or be set further into the future, although it may be shortened or\n the resource may be deleted prior to this time. For example, a user may\n request that a pod is deleted in 30 seconds. The Kubelet will react by\n sending a graceful termination signal to the containers in the pod. After\n that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL)\n to the container and after cleanup, remove the pod from the API. In the\n presence of network partitions, this object may still exist after this\n timestamp, until an administrator or automated process can determine the\n resource is fully terminated. If not set, graceful deletion of the object\n has not been requested.\n\n Populated by the system when a graceful deletion is requested. Read-only.\n More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n finalizers\t<[]string>\n Must be empty before the object is deleted from the registry. Each entry is\n an identifier for the responsible component that will remove the entry from\n the list. If the deletionTimestamp of the object is non-nil, entries in\n this list can only be removed. Finalizers may be processed and removed in\n any order. Order is NOT enforced because it introduces significant risk of\n stuck finalizers. finalizers is a shared field, any actor with permission\n can reorder it. If the finalizer list is processed in order, then this can\n lead to a situation in which the component responsible for the first\n finalizer in the list is waiting for a signal (field value, external\n system, or other) produced by a component responsible for a finalizer later\n in the list, resulting in a deadlock. Without enforced ordering finalizers\n are free to order amongst themselves and are not vulnerable to ordering\n changes in the list.\n\n generateName\t\n GenerateName is an optional prefix, used by the server, to generate a\n unique name ONLY IF the Name field has not been provided. If this field is\n used, the name returned to the client will be different than the name\n passed. This value will also be combined with a unique suffix. The provided\n value has the same validation rules as the Name field, and may be truncated\n by the length of the suffix required to make the value unique on the\n server.\n\n If this field is specified and the generated name exists, the server will\n return a 409.\n\n Applied only if Name is not specified. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n\n generation\t\n A sequence number representing a specific generation of the desired state.\n Populated by the system. Read-only.\n\n labels\t\n Map of string keys and values that can be used to organize and categorize\n (scope and select) objects. May match selectors of replication controllers\n and services. More info: http://kubernetes.io/docs/user-guide/labels\n\n managedFields\t<[]Object>\n ManagedFields maps workflow-id and version to the set of fields that are\n managed by that workflow. This is mostly for internal housekeeping, and\n users typically shouldn't need to set or understand this field. A workflow\n can be the user's name, a controller's name, or the name of a specific\n apply path like \"ci-cd\". The set of fields is always in the version that\n the workflow used when modifying the object.\n\n name\t\n Name must be unique within a namespace. Is required when creating\n resources, although some resources may allow a client to request the\n generation of an appropriate name automatically. Name is primarily intended\n for creation idempotence and configuration definition. Cannot be updated.\n More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n namespace\t\n Namespace defines the space within which each name must be unique. An empty\n namespace is equivalent to the \"default\" namespace, but \"default\" is the\n canonical representation. Not all objects are required to be scoped to a\n namespace - the value of this field for those objects will be empty.\n\n Must be a DNS_LABEL. Cannot be updated. More info:\n http://kubernetes.io/docs/user-guide/namespaces\n\n ownerReferences\t<[]Object>\n List of objects depended by this object. If ALL objects in the list have\n been deleted, this object will be garbage collected. If this object is\n managed by a controller, then an entry in this list will point to this\n controller, with the controller field set to true. There cannot be more\n than one managing controller.\n\n resourceVersion\t\n An opaque value that represents the internal version of this object that\n can be used by clients to determine when objects have changed. May be used\n for optimistic concurrency, change detection, and the watch operation on a\n resource or set of resources. Clients must treat these values as opaque and\n passed unmodified back to the server. They may only be valid for a\n particular resource or set of resources.\n\n Populated by the system. Read-only. Value must be treated as opaque by\n clients and . More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n\n selfLink\t\n Deprecated: selfLink is a legacy read-only field that is no longer\n populated by the system.\n\n uid\t\n UID is the unique in time and space value for this object. It is typically\n generated by the server on successful creation of a resource and is not\n allowed to change on PUT operations.\n\n Populated by the system. Read-only. More info:\n http://kubernetes.io/docs/user-guide/identifiers#uids\n\n" - Jul 29 15:31:10.915: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 explain e2e-test-crd-publish-openapi-1403-crds.spec' - Jul 29 15:31:11.388: INFO: stderr: "" - Jul 29 15:31:11.388: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-1403-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: spec \n\nDESCRIPTION:\n Specification of Foo\n\nFIELDS:\n bars\t<[]Object>\n List of Bars and their specs.\n\n" - Jul 29 15:31:11.389: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 explain e2e-test-crd-publish-openapi-1403-crds.spec.bars' - Jul 29 15:31:11.796: INFO: stderr: "" - Jul 29 15:31:11.796: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-1403-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: bars <[]Object>\n\nDESCRIPTION:\n List of Bars and their specs.\n\nFIELDS:\n age\t\n Age of Bar.\n\n bazs\t<[]string>\n List of Bazs.\n\n feeling\t\n Whether Bar is feeling great.\n\n name\t -required-\n Name of Bar.\n\n" - STEP: kubectl explain works to return error when explain is called on property that doesn't exist 07/29/23 15:31:11.796 - Jul 29 15:31:11.796: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-3062 explain e2e-test-crd-publish-openapi-1403-crds.spec.bars2' - Jul 29 15:31:12.184: INFO: rc: 1 - [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:14.647: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - tear down framework | framework.go:193 - STEP: Destroying namespace "crd-publish-openapi-3062" for this suite. 07/29/23 15:31:14.663 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-api-machinery] Namespaces [Serial] - should apply changes to a namespace status [Conformance] - test/e2e/apimachinery/namespace.go:299 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + should include custom resource definition resources in discovery documents [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:198 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:14.681 -Jul 29 15:31:14.682: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename namespaces 07/29/23 15:31:14.684 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:14.711 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:14.717 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +STEP: Creating a kubernetes client 08/24/23 11:40:21.152 +Aug 24 11:40:21.153: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename custom-resource-definition 08/24/23 11:40:21.156 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:40:21.189 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:40:21.193 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should apply changes to a namespace status [Conformance] - test/e2e/apimachinery/namespace.go:299 -STEP: Read namespace status 07/29/23 15:31:14.721 -Jul 29 15:31:14.727: INFO: Status: v1.NamespaceStatus{Phase:"Active", Conditions:[]v1.NamespaceCondition(nil)} -STEP: Patch namespace status 07/29/23 15:31:14.727 -Jul 29 15:31:14.739: INFO: Status.Condition: v1.NamespaceCondition{Type:"StatusPatch", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Patched by an e2e test"} -STEP: Update namespace status 07/29/23 15:31:14.739 -Jul 29 15:31:14.757: INFO: Status.Condition: v1.NamespaceCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Updated by an e2e test"} -[AfterEach] [sig-api-machinery] Namespaces [Serial] +[It] should include custom resource definition resources in discovery documents [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:198 +STEP: fetching the /apis discovery document 08/24/23 11:40:21.197 +STEP: finding the apiextensions.k8s.io API group in the /apis discovery document 08/24/23 11:40:21.199 +STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis discovery document 08/24/23 11:40:21.2 +STEP: fetching the /apis/apiextensions.k8s.io discovery document 08/24/23 11:40:21.2 +STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis/apiextensions.k8s.io discovery document 08/24/23 11:40:21.201 +STEP: fetching the /apis/apiextensions.k8s.io/v1 discovery document 08/24/23 11:40:21.202 +STEP: finding customresourcedefinitions resources in the /apis/apiextensions.k8s.io/v1 discovery document 08/24/23 11:40:21.203 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:14.757: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +Aug 24 11:40:21.204: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "namespaces-8994" for this suite. 07/29/23 15:31:14.765 +STEP: Destroying namespace "custom-resource-definition-3968" for this suite. 08/24/23 11:40:21.211 ------------------------------ -• [0.096 seconds] -[sig-api-machinery] Namespaces [Serial] +• [0.072 seconds] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/apimachinery/framework.go:23 - should apply changes to a namespace status [Conformance] - test/e2e/apimachinery/namespace.go:299 + should include custom resource definition resources in discovery documents [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:198 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Namespaces [Serial] + [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:14.681 - Jul 29 15:31:14.682: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename namespaces 07/29/23 15:31:14.684 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:14.711 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:14.717 - [BeforeEach] [sig-api-machinery] Namespaces [Serial] + STEP: Creating a kubernetes client 08/24/23 11:40:21.152 + Aug 24 11:40:21.153: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename custom-resource-definition 08/24/23 11:40:21.156 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:40:21.189 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:40:21.193 + [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should apply changes to a namespace status [Conformance] - test/e2e/apimachinery/namespace.go:299 - STEP: Read namespace status 07/29/23 15:31:14.721 - Jul 29 15:31:14.727: INFO: Status: v1.NamespaceStatus{Phase:"Active", Conditions:[]v1.NamespaceCondition(nil)} - STEP: Patch namespace status 07/29/23 15:31:14.727 - Jul 29 15:31:14.739: INFO: Status.Condition: v1.NamespaceCondition{Type:"StatusPatch", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Patched by an e2e test"} - STEP: Update namespace status 07/29/23 15:31:14.739 - Jul 29 15:31:14.757: INFO: Status.Condition: v1.NamespaceCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Updated by an e2e test"} - [AfterEach] [sig-api-machinery] Namespaces [Serial] + [It] should include custom resource definition resources in discovery documents [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:198 + STEP: fetching the /apis discovery document 08/24/23 11:40:21.197 + STEP: finding the apiextensions.k8s.io API group in the /apis discovery document 08/24/23 11:40:21.199 + STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis discovery document 08/24/23 11:40:21.2 + STEP: fetching the /apis/apiextensions.k8s.io discovery document 08/24/23 11:40:21.2 + STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis/apiextensions.k8s.io discovery document 08/24/23 11:40:21.201 + STEP: fetching the /apis/apiextensions.k8s.io/v1 discovery document 08/24/23 11:40:21.202 + STEP: finding customresourcedefinitions resources in the /apis/apiextensions.k8s.io/v1 discovery document 08/24/23 11:40:21.203 + [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:14.757: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + Aug 24 11:40:21.204: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "namespaces-8994" for this suite. 07/29/23 15:31:14.765 + STEP: Destroying namespace "custom-resource-definition-3968" for this suite. 08/24/23 11:40:21.211 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Kubelet when scheduling a busybox command that always fails in a pod - should be possible to delete [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:135 -[BeforeEach] [sig-node] Kubelet +[sig-node] Secrets + should fail to create secret due to empty secret key [Conformance] + test/e2e/common/node/secrets.go:140 +[BeforeEach] [sig-node] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:14.782 -Jul 29 15:31:14.782: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubelet-test 07/29/23 15:31:14.783 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:14.806 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:14.81 -[BeforeEach] [sig-node] Kubelet +STEP: Creating a kubernetes client 08/24/23 11:40:21.228 +Aug 24 11:40:21.228: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 11:40:21.229 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:40:21.254 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:40:21.258 +[BeforeEach] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Kubelet - test/e2e/common/node/kubelet.go:41 -[BeforeEach] when scheduling a busybox command that always fails in a pod - test/e2e/common/node/kubelet.go:85 -[It] should be possible to delete [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:135 -[AfterEach] [sig-node] Kubelet +[It] should fail to create secret due to empty secret key [Conformance] + test/e2e/common/node/secrets.go:140 +STEP: Creating projection with secret that has name secret-emptykey-test-8924e332-627e-4966-a6f7-3b6f225f8d8a 08/24/23 11:40:21.262 +[AfterEach] [sig-node] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:14.889: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Kubelet +Aug 24 11:40:21.265: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Kubelet +[DeferCleanup (Each)] [sig-node] Secrets dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Kubelet +[DeferCleanup (Each)] [sig-node] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "kubelet-test-6085" for this suite. 07/29/23 15:31:14.896 +STEP: Destroying namespace "secrets-4205" for this suite. 08/24/23 11:40:21.272 ------------------------------ -• [0.182 seconds] -[sig-node] Kubelet +• [0.054 seconds] +[sig-node] Secrets test/e2e/common/node/framework.go:23 - when scheduling a busybox command that always fails in a pod - test/e2e/common/node/kubelet.go:82 - should be possible to delete [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:135 + should fail to create secret due to empty secret key [Conformance] + test/e2e/common/node/secrets.go:140 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Kubelet + [BeforeEach] [sig-node] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:14.782 - Jul 29 15:31:14.782: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubelet-test 07/29/23 15:31:14.783 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:14.806 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:14.81 - [BeforeEach] [sig-node] Kubelet + STEP: Creating a kubernetes client 08/24/23 11:40:21.228 + Aug 24 11:40:21.228: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 11:40:21.229 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:40:21.254 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:40:21.258 + [BeforeEach] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Kubelet - test/e2e/common/node/kubelet.go:41 - [BeforeEach] when scheduling a busybox command that always fails in a pod - test/e2e/common/node/kubelet.go:85 - [It] should be possible to delete [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:135 - [AfterEach] [sig-node] Kubelet + [It] should fail to create secret due to empty secret key [Conformance] + test/e2e/common/node/secrets.go:140 + STEP: Creating projection with secret that has name secret-emptykey-test-8924e332-627e-4966-a6f7-3b6f225f8d8a 08/24/23 11:40:21.262 + [AfterEach] [sig-node] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:14.889: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Kubelet + Aug 24 11:40:21.265: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Kubelet + [DeferCleanup (Each)] [sig-node] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Kubelet + [DeferCleanup (Each)] [sig-node] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "kubelet-test-6085" for this suite. 07/29/23 15:31:14.896 + STEP: Destroying namespace "secrets-4205" for this suite. 08/24/23 11:40:21.272 << End Captured GinkgoWriter Output ------------------------------ -SS +S ------------------------------ -[sig-cli] Kubectl client Kubectl label - should update the label on a resource [Conformance] - test/e2e/kubectl/kubectl.go:1509 +[sig-cli] Kubectl client Kubectl replace + should update a single-container pod's image [Conformance] + test/e2e/kubectl/kubectl.go:1747 [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:14.967 -Jul 29 15:31:14.967: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 15:31:14.97 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:15.024 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:15.03 +STEP: Creating a kubernetes client 08/24/23 11:40:21.282 +Aug 24 11:40:21.283: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 11:40:21.285 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:40:21.306 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:40:21.31 [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 -[BeforeEach] Kubectl label - test/e2e/kubectl/kubectl.go:1494 -STEP: creating the pod 07/29/23 15:31:15.034 -Jul 29 15:31:15.037: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 create -f -' -Jul 29 15:31:16.339: INFO: stderr: "" -Jul 29 15:31:16.339: INFO: stdout: "pod/pause created\n" -Jul 29 15:31:16.339: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause] -Jul 29 15:31:16.339: INFO: Waiting up to 5m0s for pod "pause" in namespace "kubectl-1467" to be "running and ready" -Jul 29 15:31:16.349: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 9.493833ms -Jul 29 15:31:16.349: INFO: Error evaluating pod condition running and ready: want pod 'pause' on 'wetuj3nuajog-3' to be 'Running' but was 'Pending' -Jul 29 15:31:18.358: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 2.018858751s -Jul 29 15:31:18.358: INFO: Pod "pause" satisfied condition "running and ready" -Jul 29 15:31:18.358: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause] -[It] should update the label on a resource [Conformance] - test/e2e/kubectl/kubectl.go:1509 -STEP: adding the label testing-label with value testing-label-value to a pod 07/29/23 15:31:18.359 -Jul 29 15:31:18.360: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 label pods pause testing-label=testing-label-value' -Jul 29 15:31:18.534: INFO: stderr: "" -Jul 29 15:31:18.534: INFO: stdout: "pod/pause labeled\n" -STEP: verifying the pod has the label testing-label with the value testing-label-value 07/29/23 15:31:18.535 -Jul 29 15:31:18.535: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 get pod pause -L testing-label' -Jul 29 15:31:18.677: INFO: stderr: "" -Jul 29 15:31:18.677: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s testing-label-value\n" -STEP: removing the label testing-label of a pod 07/29/23 15:31:18.677 -Jul 29 15:31:18.677: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 label pods pause testing-label-' -Jul 29 15:31:18.846: INFO: stderr: "" -Jul 29 15:31:18.846: INFO: stdout: "pod/pause unlabeled\n" -STEP: verifying the pod doesn't have the label testing-label 07/29/23 15:31:18.846 -Jul 29 15:31:18.847: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 get pod pause -L testing-label' -Jul 29 15:31:18.994: INFO: stderr: "" -Jul 29 15:31:18.994: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s \n" -[AfterEach] Kubectl label - test/e2e/kubectl/kubectl.go:1500 -STEP: using delete to clean up resources 07/29/23 15:31:18.995 -Jul 29 15:31:18.995: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 delete --grace-period=0 --force -f -' -Jul 29 15:31:19.156: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Jul 29 15:31:19.156: INFO: stdout: "pod \"pause\" force deleted\n" -Jul 29 15:31:19.157: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 get rc,svc -l name=pause --no-headers' -Jul 29 15:31:19.314: INFO: stderr: "No resources found in kubectl-1467 namespace.\n" -Jul 29 15:31:19.314: INFO: stdout: "" -Jul 29 15:31:19.314: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 get pods -l name=pause -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' -Jul 29 15:31:19.475: INFO: stderr: "" -Jul 29 15:31:19.475: INFO: stdout: "" +[BeforeEach] Kubectl replace + test/e2e/kubectl/kubectl.go:1734 +[It] should update a single-container pod's image [Conformance] + test/e2e/kubectl/kubectl.go:1747 +STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 08/24/23 11:40:21.314 +Aug 24 11:40:21.315: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-6093 run e2e-test-httpd-pod --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4 --pod-running-timeout=2m0s --labels=run=e2e-test-httpd-pod' +Aug 24 11:40:21.663: INFO: stderr: "" +Aug 24 11:40:21.663: INFO: stdout: "pod/e2e-test-httpd-pod created\n" +STEP: verifying the pod e2e-test-httpd-pod is running 08/24/23 11:40:21.663 +STEP: verifying the pod e2e-test-httpd-pod was created 08/24/23 11:40:31.718 +Aug 24 11:40:31.719: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-6093 get pod e2e-test-httpd-pod -o json' +Aug 24 11:40:31.861: INFO: stderr: "" +Aug 24 11:40:31.861: INFO: stdout: "{\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2023-08-24T11:40:21Z\",\n \"labels\": {\n \"run\": \"e2e-test-httpd-pod\"\n },\n \"name\": \"e2e-test-httpd-pod\",\n \"namespace\": \"kubectl-6093\",\n \"resourceVersion\": \"3828\",\n \"uid\": \"f98da024-0669-45a1-9d3c-f59305a59de6\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"e2e-test-httpd-pod\",\n \"resources\": {},\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-slqvb\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"ClusterFirst\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"pe9deep4seen-3\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 30,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-slqvb\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-08-24T11:40:21Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-08-24T11:40:31Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-08-24T11:40:31Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-08-24T11:40:21Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"cri-o://361aac20fcc4b35fad82a28383a1c9f4914a894be0238bf4d906c21dd2523ccb\",\n \"image\": \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\",\n \"imageID\": \"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22\",\n \"lastState\": {},\n \"name\": \"e2e-test-httpd-pod\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2023-08-24T11:40:30Z\"\n }\n }\n }\n ],\n \"hostIP\": \"192.168.121.130\",\n \"phase\": \"Running\",\n \"podIP\": \"10.233.66.206\",\n \"podIPs\": [\n {\n \"ip\": \"10.233.66.206\"\n }\n ],\n \"qosClass\": \"BestEffort\",\n \"startTime\": \"2023-08-24T11:40:21Z\"\n }\n}\n" +STEP: replace the image in the pod 08/24/23 11:40:31.862 +Aug 24 11:40:31.862: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-6093 replace -f -' +Aug 24 11:40:32.970: INFO: stderr: "" +Aug 24 11:40:32.970: INFO: stdout: "pod/e2e-test-httpd-pod replaced\n" +STEP: verifying the pod e2e-test-httpd-pod has the right image registry.k8s.io/e2e-test-images/busybox:1.29-4 08/24/23 11:40:32.97 +[AfterEach] Kubectl replace + test/e2e/kubectl/kubectl.go:1738 +Aug 24 11:40:32.980: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-6093 delete pods e2e-test-httpd-pod' +Aug 24 11:40:36.675: INFO: stderr: "" +Aug 24 11:40:36.675: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:19.475: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 11:40:36.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-1467" for this suite. 07/29/23 15:31:19.487 +STEP: Destroying namespace "kubectl-6093" for this suite. 08/24/23 11:40:36.689 ------------------------------ -• [4.534 seconds] +• [SLOW TEST] [15.423 seconds] [sig-cli] Kubectl client test/e2e/kubectl/framework.go:23 - Kubectl label - test/e2e/kubectl/kubectl.go:1492 - should update the label on a resource [Conformance] - test/e2e/kubectl/kubectl.go:1509 + Kubectl replace + test/e2e/kubectl/kubectl.go:1731 + should update a single-container pod's image [Conformance] + test/e2e/kubectl/kubectl.go:1747 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:14.967 - Jul 29 15:31:14.967: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 15:31:14.97 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:15.024 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:15.03 + STEP: Creating a kubernetes client 08/24/23 11:40:21.282 + Aug 24 11:40:21.283: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 11:40:21.285 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:40:21.306 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:40:21.31 [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 - [BeforeEach] Kubectl label - test/e2e/kubectl/kubectl.go:1494 - STEP: creating the pod 07/29/23 15:31:15.034 - Jul 29 15:31:15.037: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 create -f -' - Jul 29 15:31:16.339: INFO: stderr: "" - Jul 29 15:31:16.339: INFO: stdout: "pod/pause created\n" - Jul 29 15:31:16.339: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause] - Jul 29 15:31:16.339: INFO: Waiting up to 5m0s for pod "pause" in namespace "kubectl-1467" to be "running and ready" - Jul 29 15:31:16.349: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 9.493833ms - Jul 29 15:31:16.349: INFO: Error evaluating pod condition running and ready: want pod 'pause' on 'wetuj3nuajog-3' to be 'Running' but was 'Pending' - Jul 29 15:31:18.358: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 2.018858751s - Jul 29 15:31:18.358: INFO: Pod "pause" satisfied condition "running and ready" - Jul 29 15:31:18.358: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause] - [It] should update the label on a resource [Conformance] - test/e2e/kubectl/kubectl.go:1509 - STEP: adding the label testing-label with value testing-label-value to a pod 07/29/23 15:31:18.359 - Jul 29 15:31:18.360: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 label pods pause testing-label=testing-label-value' - Jul 29 15:31:18.534: INFO: stderr: "" - Jul 29 15:31:18.534: INFO: stdout: "pod/pause labeled\n" - STEP: verifying the pod has the label testing-label with the value testing-label-value 07/29/23 15:31:18.535 - Jul 29 15:31:18.535: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 get pod pause -L testing-label' - Jul 29 15:31:18.677: INFO: stderr: "" - Jul 29 15:31:18.677: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s testing-label-value\n" - STEP: removing the label testing-label of a pod 07/29/23 15:31:18.677 - Jul 29 15:31:18.677: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 label pods pause testing-label-' - Jul 29 15:31:18.846: INFO: stderr: "" - Jul 29 15:31:18.846: INFO: stdout: "pod/pause unlabeled\n" - STEP: verifying the pod doesn't have the label testing-label 07/29/23 15:31:18.846 - Jul 29 15:31:18.847: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 get pod pause -L testing-label' - Jul 29 15:31:18.994: INFO: stderr: "" - Jul 29 15:31:18.994: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s \n" - [AfterEach] Kubectl label - test/e2e/kubectl/kubectl.go:1500 - STEP: using delete to clean up resources 07/29/23 15:31:18.995 - Jul 29 15:31:18.995: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 delete --grace-period=0 --force -f -' - Jul 29 15:31:19.156: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" - Jul 29 15:31:19.156: INFO: stdout: "pod \"pause\" force deleted\n" - Jul 29 15:31:19.157: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 get rc,svc -l name=pause --no-headers' - Jul 29 15:31:19.314: INFO: stderr: "No resources found in kubectl-1467 namespace.\n" - Jul 29 15:31:19.314: INFO: stdout: "" - Jul 29 15:31:19.314: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1467 get pods -l name=pause -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' - Jul 29 15:31:19.475: INFO: stderr: "" - Jul 29 15:31:19.475: INFO: stdout: "" + [BeforeEach] Kubectl replace + test/e2e/kubectl/kubectl.go:1734 + [It] should update a single-container pod's image [Conformance] + test/e2e/kubectl/kubectl.go:1747 + STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 08/24/23 11:40:21.314 + Aug 24 11:40:21.315: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-6093 run e2e-test-httpd-pod --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4 --pod-running-timeout=2m0s --labels=run=e2e-test-httpd-pod' + Aug 24 11:40:21.663: INFO: stderr: "" + Aug 24 11:40:21.663: INFO: stdout: "pod/e2e-test-httpd-pod created\n" + STEP: verifying the pod e2e-test-httpd-pod is running 08/24/23 11:40:21.663 + STEP: verifying the pod e2e-test-httpd-pod was created 08/24/23 11:40:31.718 + Aug 24 11:40:31.719: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-6093 get pod e2e-test-httpd-pod -o json' + Aug 24 11:40:31.861: INFO: stderr: "" + Aug 24 11:40:31.861: INFO: stdout: "{\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2023-08-24T11:40:21Z\",\n \"labels\": {\n \"run\": \"e2e-test-httpd-pod\"\n },\n \"name\": \"e2e-test-httpd-pod\",\n \"namespace\": \"kubectl-6093\",\n \"resourceVersion\": \"3828\",\n \"uid\": \"f98da024-0669-45a1-9d3c-f59305a59de6\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"e2e-test-httpd-pod\",\n \"resources\": {},\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-slqvb\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"ClusterFirst\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"pe9deep4seen-3\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 30,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-slqvb\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-08-24T11:40:21Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-08-24T11:40:31Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-08-24T11:40:31Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-08-24T11:40:21Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"cri-o://361aac20fcc4b35fad82a28383a1c9f4914a894be0238bf4d906c21dd2523ccb\",\n \"image\": \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\",\n \"imageID\": \"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22\",\n \"lastState\": {},\n \"name\": \"e2e-test-httpd-pod\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2023-08-24T11:40:30Z\"\n }\n }\n }\n ],\n \"hostIP\": \"192.168.121.130\",\n \"phase\": \"Running\",\n \"podIP\": \"10.233.66.206\",\n \"podIPs\": [\n {\n \"ip\": \"10.233.66.206\"\n }\n ],\n \"qosClass\": \"BestEffort\",\n \"startTime\": \"2023-08-24T11:40:21Z\"\n }\n}\n" + STEP: replace the image in the pod 08/24/23 11:40:31.862 + Aug 24 11:40:31.862: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-6093 replace -f -' + Aug 24 11:40:32.970: INFO: stderr: "" + Aug 24 11:40:32.970: INFO: stdout: "pod/e2e-test-httpd-pod replaced\n" + STEP: verifying the pod e2e-test-httpd-pod has the right image registry.k8s.io/e2e-test-images/busybox:1.29-4 08/24/23 11:40:32.97 + [AfterEach] Kubectl replace + test/e2e/kubectl/kubectl.go:1738 + Aug 24 11:40:32.980: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-6093 delete pods e2e-test-httpd-pod' + Aug 24 11:40:36.675: INFO: stderr: "" + Aug 24 11:40:36.675: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:19.475: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 11:40:36.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-1467" for this suite. 07/29/23 15:31:19.487 + STEP: Destroying namespace "kubectl-6093" for this suite. 08/24/23 11:40:36.689 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:174 -[BeforeEach] [sig-storage] Projected configMap +[sig-apps] ControllerRevision [Serial] + should manage the lifecycle of a ControllerRevision [Conformance] + test/e2e/apps/controller_revision.go:124 +[BeforeEach] [sig-apps] ControllerRevision [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:19.501 -Jul 29 15:31:19.501: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 15:31:19.503 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:19.529 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:19.536 -[BeforeEach] [sig-storage] Projected configMap +STEP: Creating a kubernetes client 08/24/23 11:40:36.709 +Aug 24 11:40:36.709: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename controllerrevisions 08/24/23 11:40:36.712 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:40:36.741 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:40:36.746 +[BeforeEach] [sig-apps] ControllerRevision [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:174 -STEP: Creating configMap with name cm-test-opt-del-050c8b0f-1be3-46fd-bccc-233f83d28d52 07/29/23 15:31:19.548 -STEP: Creating configMap with name cm-test-opt-upd-9c6b490e-1cfe-4fca-b1a6-0b0bc5ff7a43 07/29/23 15:31:19.558 -STEP: Creating the pod 07/29/23 15:31:19.564 -Jul 29 15:31:19.593: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14" in namespace "projected-7827" to be "running and ready" -Jul 29 15:31:19.609: INFO: Pod "pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14": Phase="Pending", Reason="", readiness=false. Elapsed: 15.529086ms -Jul 29 15:31:19.610: INFO: The phase of Pod pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:31:21.618: INFO: Pod "pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14": Phase="Running", Reason="", readiness=true. Elapsed: 2.024559758s -Jul 29 15:31:21.618: INFO: The phase of Pod pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14 is Running (Ready = true) -Jul 29 15:31:21.618: INFO: Pod "pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14" satisfied condition "running and ready" -STEP: Deleting configmap cm-test-opt-del-050c8b0f-1be3-46fd-bccc-233f83d28d52 07/29/23 15:31:21.666 -STEP: Updating configmap cm-test-opt-upd-9c6b490e-1cfe-4fca-b1a6-0b0bc5ff7a43 07/29/23 15:31:21.69 -STEP: Creating configMap with name cm-test-opt-create-ddbeb5e7-ab5c-4247-ad99-27ff6155fb95 07/29/23 15:31:21.701 -STEP: waiting to observe update in volume 07/29/23 15:31:21.709 -[AfterEach] [sig-storage] Projected configMap +[BeforeEach] [sig-apps] ControllerRevision [Serial] + test/e2e/apps/controller_revision.go:93 +[It] should manage the lifecycle of a ControllerRevision [Conformance] + test/e2e/apps/controller_revision.go:124 +STEP: Creating DaemonSet "e2e-xww5l-daemon-set" 08/24/23 11:40:36.796 +STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 11:40:36.81 +Aug 24 11:40:36.827: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 0 +Aug 24 11:40:36.827: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:37.859: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 0 +Aug 24 11:40:37.859: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:38.842: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 +Aug 24 11:40:38.843: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:39.848: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 +Aug 24 11:40:39.848: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:40.845: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 +Aug 24 11:40:40.846: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:41.844: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 +Aug 24 11:40:41.844: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:42.861: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 +Aug 24 11:40:42.861: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:43.844: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 +Aug 24 11:40:43.844: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:44.851: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 +Aug 24 11:40:44.851: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:45.845: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 +Aug 24 11:40:45.845: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:46.841: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:46.842: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:47.851: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:47.851: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:48.842: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:48.842: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:49.847: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:49.847: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:50.864: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:50.864: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:51.842: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:51.842: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:52.853: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:52.853: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:53.844: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:53.844: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:54.846: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:54.846: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:55.846: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:55.846: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:56.843: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 +Aug 24 11:40:56.843: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:40:57.843: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 3 +Aug 24 11:40:57.843: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset e2e-xww5l-daemon-set +STEP: Confirm DaemonSet "e2e-xww5l-daemon-set" successfully created with "daemonset-name=e2e-xww5l-daemon-set" label 08/24/23 11:40:57.851 +STEP: Listing all ControllerRevisions with label "daemonset-name=e2e-xww5l-daemon-set" 08/24/23 11:40:57.865 +Aug 24 11:40:57.870: INFO: Located ControllerRevision: "e2e-xww5l-daemon-set-7f6fbc98bd" +STEP: Patching ControllerRevision "e2e-xww5l-daemon-set-7f6fbc98bd" 08/24/23 11:40:57.875 +Aug 24 11:40:57.920: INFO: e2e-xww5l-daemon-set-7f6fbc98bd has been patched +STEP: Create a new ControllerRevision 08/24/23 11:40:57.92 +Aug 24 11:40:57.931: INFO: Created ControllerRevision: e2e-xww5l-daemon-set-677f49f974 +STEP: Confirm that there are two ControllerRevisions 08/24/23 11:40:57.931 +Aug 24 11:40:57.932: INFO: Requesting list of ControllerRevisions to confirm quantity +Aug 24 11:40:57.939: INFO: Found 2 ControllerRevisions +STEP: Deleting ControllerRevision "e2e-xww5l-daemon-set-7f6fbc98bd" 08/24/23 11:40:57.939 +STEP: Confirm that there is only one ControllerRevision 08/24/23 11:40:57.953 +Aug 24 11:40:57.953: INFO: Requesting list of ControllerRevisions to confirm quantity +Aug 24 11:40:57.959: INFO: Found 1 ControllerRevisions +STEP: Updating ControllerRevision "e2e-xww5l-daemon-set-677f49f974" 08/24/23 11:40:57.964 +Aug 24 11:40:57.978: INFO: e2e-xww5l-daemon-set-677f49f974 has been updated +STEP: Generate another ControllerRevision by patching the Daemonset 08/24/23 11:40:57.978 +W0824 11:40:57.989363 14 warnings.go:70] unknown field "updateStrategy" +STEP: Confirm that there are two ControllerRevisions 08/24/23 11:40:57.989 +Aug 24 11:40:57.989: INFO: Requesting list of ControllerRevisions to confirm quantity +Aug 24 11:40:58.999: INFO: Requesting list of ControllerRevisions to confirm quantity +Aug 24 11:40:59.009: INFO: Found 2 ControllerRevisions +STEP: Removing a ControllerRevision via 'DeleteCollection' with labelSelector: "e2e-xww5l-daemon-set-677f49f974=updated" 08/24/23 11:40:59.009 +STEP: Confirm that there is only one ControllerRevision 08/24/23 11:40:59.021 +Aug 24 11:40:59.022: INFO: Requesting list of ControllerRevisions to confirm quantity +Aug 24 11:40:59.027: INFO: Found 1 ControllerRevisions +Aug 24 11:40:59.032: INFO: ControllerRevision "e2e-xww5l-daemon-set-854b796487" has revision 3 +[AfterEach] [sig-apps] ControllerRevision [Serial] + test/e2e/apps/controller_revision.go:58 +STEP: Deleting DaemonSet "e2e-xww5l-daemon-set" 08/24/23 11:40:59.037 +STEP: deleting DaemonSet.extensions e2e-xww5l-daemon-set in namespace controllerrevisions-7151, will wait for the garbage collector to delete the pods 08/24/23 11:40:59.037 +Aug 24 11:40:59.109: INFO: Deleting DaemonSet.extensions e2e-xww5l-daemon-set took: 13.920995ms +Aug 24 11:40:59.329: INFO: Terminating DaemonSet.extensions e2e-xww5l-daemon-set pods took: 220.25128ms +Aug 24 11:41:01.836: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 0 +Aug 24 11:41:01.836: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset e2e-xww5l-daemon-set +Aug 24 11:41:01.844: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"4014"},"items":null} + +Aug 24 11:41:01.850: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"4014"},"items":null} + +[AfterEach] [sig-apps] ControllerRevision [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:23.756: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected configMap +Aug 24 11:41:01.873: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "projected-7827" for this suite. 07/29/23 15:31:23.764 +STEP: Destroying namespace "controllerrevisions-7151" for this suite. 08/24/23 11:41:01.879 ------------------------------ -• [4.277 seconds] -[sig-storage] Projected configMap -test/e2e/common/storage/framework.go:23 - optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:174 +• [SLOW TEST] [25.182 seconds] +[sig-apps] ControllerRevision [Serial] +test/e2e/apps/framework.go:23 + should manage the lifecycle of a ControllerRevision [Conformance] + test/e2e/apps/controller_revision.go:124 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected configMap + [BeforeEach] [sig-apps] ControllerRevision [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:19.501 - Jul 29 15:31:19.501: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 15:31:19.503 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:19.529 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:19.536 - [BeforeEach] [sig-storage] Projected configMap + STEP: Creating a kubernetes client 08/24/23 11:40:36.709 + Aug 24 11:40:36.709: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename controllerrevisions 08/24/23 11:40:36.712 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:40:36.741 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:40:36.746 + [BeforeEach] [sig-apps] ControllerRevision [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:174 - STEP: Creating configMap with name cm-test-opt-del-050c8b0f-1be3-46fd-bccc-233f83d28d52 07/29/23 15:31:19.548 - STEP: Creating configMap with name cm-test-opt-upd-9c6b490e-1cfe-4fca-b1a6-0b0bc5ff7a43 07/29/23 15:31:19.558 - STEP: Creating the pod 07/29/23 15:31:19.564 - Jul 29 15:31:19.593: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14" in namespace "projected-7827" to be "running and ready" - Jul 29 15:31:19.609: INFO: Pod "pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14": Phase="Pending", Reason="", readiness=false. Elapsed: 15.529086ms - Jul 29 15:31:19.610: INFO: The phase of Pod pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:31:21.618: INFO: Pod "pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14": Phase="Running", Reason="", readiness=true. Elapsed: 2.024559758s - Jul 29 15:31:21.618: INFO: The phase of Pod pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14 is Running (Ready = true) - Jul 29 15:31:21.618: INFO: Pod "pod-projected-configmaps-935b8cdf-c8ec-4dc4-ac5a-fffaeb703d14" satisfied condition "running and ready" - STEP: Deleting configmap cm-test-opt-del-050c8b0f-1be3-46fd-bccc-233f83d28d52 07/29/23 15:31:21.666 - STEP: Updating configmap cm-test-opt-upd-9c6b490e-1cfe-4fca-b1a6-0b0bc5ff7a43 07/29/23 15:31:21.69 - STEP: Creating configMap with name cm-test-opt-create-ddbeb5e7-ab5c-4247-ad99-27ff6155fb95 07/29/23 15:31:21.701 - STEP: waiting to observe update in volume 07/29/23 15:31:21.709 - [AfterEach] [sig-storage] Projected configMap + [BeforeEach] [sig-apps] ControllerRevision [Serial] + test/e2e/apps/controller_revision.go:93 + [It] should manage the lifecycle of a ControllerRevision [Conformance] + test/e2e/apps/controller_revision.go:124 + STEP: Creating DaemonSet "e2e-xww5l-daemon-set" 08/24/23 11:40:36.796 + STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 11:40:36.81 + Aug 24 11:40:36.827: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 0 + Aug 24 11:40:36.827: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:37.859: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 0 + Aug 24 11:40:37.859: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:38.842: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 + Aug 24 11:40:38.843: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:39.848: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 + Aug 24 11:40:39.848: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:40.845: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 + Aug 24 11:40:40.846: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:41.844: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 + Aug 24 11:40:41.844: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:42.861: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 + Aug 24 11:40:42.861: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:43.844: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 + Aug 24 11:40:43.844: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:44.851: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 + Aug 24 11:40:44.851: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:45.845: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 1 + Aug 24 11:40:45.845: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:46.841: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:46.842: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:47.851: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:47.851: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:48.842: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:48.842: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:49.847: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:49.847: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:50.864: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:50.864: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:51.842: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:51.842: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:52.853: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:52.853: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:53.844: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:53.844: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:54.846: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:54.846: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:55.846: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:55.846: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:56.843: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 2 + Aug 24 11:40:56.843: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:40:57.843: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 3 + Aug 24 11:40:57.843: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset e2e-xww5l-daemon-set + STEP: Confirm DaemonSet "e2e-xww5l-daemon-set" successfully created with "daemonset-name=e2e-xww5l-daemon-set" label 08/24/23 11:40:57.851 + STEP: Listing all ControllerRevisions with label "daemonset-name=e2e-xww5l-daemon-set" 08/24/23 11:40:57.865 + Aug 24 11:40:57.870: INFO: Located ControllerRevision: "e2e-xww5l-daemon-set-7f6fbc98bd" + STEP: Patching ControllerRevision "e2e-xww5l-daemon-set-7f6fbc98bd" 08/24/23 11:40:57.875 + Aug 24 11:40:57.920: INFO: e2e-xww5l-daemon-set-7f6fbc98bd has been patched + STEP: Create a new ControllerRevision 08/24/23 11:40:57.92 + Aug 24 11:40:57.931: INFO: Created ControllerRevision: e2e-xww5l-daemon-set-677f49f974 + STEP: Confirm that there are two ControllerRevisions 08/24/23 11:40:57.931 + Aug 24 11:40:57.932: INFO: Requesting list of ControllerRevisions to confirm quantity + Aug 24 11:40:57.939: INFO: Found 2 ControllerRevisions + STEP: Deleting ControllerRevision "e2e-xww5l-daemon-set-7f6fbc98bd" 08/24/23 11:40:57.939 + STEP: Confirm that there is only one ControllerRevision 08/24/23 11:40:57.953 + Aug 24 11:40:57.953: INFO: Requesting list of ControllerRevisions to confirm quantity + Aug 24 11:40:57.959: INFO: Found 1 ControllerRevisions + STEP: Updating ControllerRevision "e2e-xww5l-daemon-set-677f49f974" 08/24/23 11:40:57.964 + Aug 24 11:40:57.978: INFO: e2e-xww5l-daemon-set-677f49f974 has been updated + STEP: Generate another ControllerRevision by patching the Daemonset 08/24/23 11:40:57.978 + W0824 11:40:57.989363 14 warnings.go:70] unknown field "updateStrategy" + STEP: Confirm that there are two ControllerRevisions 08/24/23 11:40:57.989 + Aug 24 11:40:57.989: INFO: Requesting list of ControllerRevisions to confirm quantity + Aug 24 11:40:58.999: INFO: Requesting list of ControllerRevisions to confirm quantity + Aug 24 11:40:59.009: INFO: Found 2 ControllerRevisions + STEP: Removing a ControllerRevision via 'DeleteCollection' with labelSelector: "e2e-xww5l-daemon-set-677f49f974=updated" 08/24/23 11:40:59.009 + STEP: Confirm that there is only one ControllerRevision 08/24/23 11:40:59.021 + Aug 24 11:40:59.022: INFO: Requesting list of ControllerRevisions to confirm quantity + Aug 24 11:40:59.027: INFO: Found 1 ControllerRevisions + Aug 24 11:40:59.032: INFO: ControllerRevision "e2e-xww5l-daemon-set-854b796487" has revision 3 + [AfterEach] [sig-apps] ControllerRevision [Serial] + test/e2e/apps/controller_revision.go:58 + STEP: Deleting DaemonSet "e2e-xww5l-daemon-set" 08/24/23 11:40:59.037 + STEP: deleting DaemonSet.extensions e2e-xww5l-daemon-set in namespace controllerrevisions-7151, will wait for the garbage collector to delete the pods 08/24/23 11:40:59.037 + Aug 24 11:40:59.109: INFO: Deleting DaemonSet.extensions e2e-xww5l-daemon-set took: 13.920995ms + Aug 24 11:40:59.329: INFO: Terminating DaemonSet.extensions e2e-xww5l-daemon-set pods took: 220.25128ms + Aug 24 11:41:01.836: INFO: Number of nodes with available pods controlled by daemonset e2e-xww5l-daemon-set: 0 + Aug 24 11:41:01.836: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset e2e-xww5l-daemon-set + Aug 24 11:41:01.844: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"4014"},"items":null} + + Aug 24 11:41:01.850: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"4014"},"items":null} + + [AfterEach] [sig-apps] ControllerRevision [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:23.756: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected configMap + Aug 24 11:41:01.873: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "projected-7827" for this suite. 07/29/23 15:31:23.764 + STEP: Destroying namespace "controllerrevisions-7151" for this suite. 08/24/23 11:41:01.879 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] EndpointSlice - should support creating EndpointSlice API operations [Conformance] - test/e2e/network/endpointslice.go:353 -[BeforeEach] [sig-network] EndpointSlice +[sig-storage] CSIStorageCapacity + should support CSIStorageCapacities API operations [Conformance] + test/e2e/storage/csistoragecapacity.go:49 +[BeforeEach] [sig-storage] CSIStorageCapacity set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:23.782 -Jul 29 15:31:23.782: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename endpointslice 07/29/23 15:31:23.784 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:23.805 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:23.81 -[BeforeEach] [sig-network] EndpointSlice +STEP: Creating a kubernetes client 08/24/23 11:41:01.9 +Aug 24 11:41:01.900: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename csistoragecapacity 08/24/23 11:41:01.902 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:01.929 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:01.935 +[BeforeEach] [sig-storage] CSIStorageCapacity test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] EndpointSlice - test/e2e/network/endpointslice.go:52 -[It] should support creating EndpointSlice API operations [Conformance] - test/e2e/network/endpointslice.go:353 -STEP: getting /apis 07/29/23 15:31:23.814 -STEP: getting /apis/discovery.k8s.io 07/29/23 15:31:23.818 -STEP: getting /apis/discovery.k8s.iov1 07/29/23 15:31:23.821 -STEP: creating 07/29/23 15:31:23.822 -STEP: getting 07/29/23 15:31:23.867 -STEP: listing 07/29/23 15:31:23.878 -STEP: watching 07/29/23 15:31:23.884 -Jul 29 15:31:23.885: INFO: starting watch -STEP: cluster-wide listing 07/29/23 15:31:23.887 -STEP: cluster-wide watching 07/29/23 15:31:23.897 -Jul 29 15:31:23.897: INFO: starting watch -STEP: patching 07/29/23 15:31:23.9 -STEP: updating 07/29/23 15:31:23.933 -Jul 29 15:31:23.951: INFO: waiting for watch events with expected annotations -Jul 29 15:31:23.951: INFO: saw patched and updated annotations -STEP: deleting 07/29/23 15:31:23.951 -STEP: deleting a collection 07/29/23 15:31:23.975 -[AfterEach] [sig-network] EndpointSlice - test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:24.006: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] EndpointSlice - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] EndpointSlice - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] EndpointSlice - tear down framework | framework.go:193 -STEP: Destroying namespace "endpointslice-4021" for this suite. 07/29/23 15:31:24.014 ------------------------------- -• [0.244 seconds] -[sig-network] EndpointSlice -test/e2e/network/common/framework.go:23 - should support creating EndpointSlice API operations [Conformance] - test/e2e/network/endpointslice.go:353 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] EndpointSlice - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:23.782 - Jul 29 15:31:23.782: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename endpointslice 07/29/23 15:31:23.784 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:23.805 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:23.81 - [BeforeEach] [sig-network] EndpointSlice - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] EndpointSlice - test/e2e/network/endpointslice.go:52 - [It] should support creating EndpointSlice API operations [Conformance] - test/e2e/network/endpointslice.go:353 - STEP: getting /apis 07/29/23 15:31:23.814 - STEP: getting /apis/discovery.k8s.io 07/29/23 15:31:23.818 - STEP: getting /apis/discovery.k8s.iov1 07/29/23 15:31:23.821 - STEP: creating 07/29/23 15:31:23.822 - STEP: getting 07/29/23 15:31:23.867 - STEP: listing 07/29/23 15:31:23.878 - STEP: watching 07/29/23 15:31:23.884 - Jul 29 15:31:23.885: INFO: starting watch - STEP: cluster-wide listing 07/29/23 15:31:23.887 - STEP: cluster-wide watching 07/29/23 15:31:23.897 - Jul 29 15:31:23.897: INFO: starting watch - STEP: patching 07/29/23 15:31:23.9 - STEP: updating 07/29/23 15:31:23.933 - Jul 29 15:31:23.951: INFO: waiting for watch events with expected annotations - Jul 29 15:31:23.951: INFO: saw patched and updated annotations - STEP: deleting 07/29/23 15:31:23.951 - STEP: deleting a collection 07/29/23 15:31:23.975 - [AfterEach] [sig-network] EndpointSlice - test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:24.006: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] EndpointSlice - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] EndpointSlice - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] EndpointSlice - tear down framework | framework.go:193 - STEP: Destroying namespace "endpointslice-4021" for this suite. 07/29/23 15:31:24.014 - << End Captured GinkgoWriter Output ------------------------------- -SSSSS ------------------------------- -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a replication controller. [Conformance] - test/e2e/apimachinery/resource_quota.go:392 -[BeforeEach] [sig-api-machinery] ResourceQuota - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:24.03 -Jul 29 15:31:24.031: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 15:31:24.033 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:24.066 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:24.071 -[BeforeEach] [sig-api-machinery] ResourceQuota - test/e2e/framework/metrics/init/init.go:31 -[It] should create a ResourceQuota and capture the life of a replication controller. [Conformance] - test/e2e/apimachinery/resource_quota.go:392 -STEP: Counting existing ResourceQuota 07/29/23 15:31:24.075 -STEP: Creating a ResourceQuota 07/29/23 15:31:29.084 -STEP: Ensuring resource quota status is calculated 07/29/23 15:31:29.095 -STEP: Creating a ReplicationController 07/29/23 15:31:31.104 -STEP: Ensuring resource quota status captures replication controller creation 07/29/23 15:31:31.131 -STEP: Deleting a ReplicationController 07/29/23 15:31:33.14 -STEP: Ensuring resource quota status released usage 07/29/23 15:31:33.155 -[AfterEach] [sig-api-machinery] ResourceQuota - test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:35.163: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota - tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-1160" for this suite. 07/29/23 15:31:35.174 ------------------------------- -• [SLOW TEST] [11.159 seconds] -[sig-api-machinery] ResourceQuota -test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a replication controller. [Conformance] - test/e2e/apimachinery/resource_quota.go:392 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:24.03 - Jul 29 15:31:24.031: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 15:31:24.033 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:24.066 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:24.071 - [BeforeEach] [sig-api-machinery] ResourceQuota - test/e2e/framework/metrics/init/init.go:31 - [It] should create a ResourceQuota and capture the life of a replication controller. [Conformance] - test/e2e/apimachinery/resource_quota.go:392 - STEP: Counting existing ResourceQuota 07/29/23 15:31:24.075 - STEP: Creating a ResourceQuota 07/29/23 15:31:29.084 - STEP: Ensuring resource quota status is calculated 07/29/23 15:31:29.095 - STEP: Creating a ReplicationController 07/29/23 15:31:31.104 - STEP: Ensuring resource quota status captures replication controller creation 07/29/23 15:31:31.131 - STEP: Deleting a ReplicationController 07/29/23 15:31:33.14 - STEP: Ensuring resource quota status released usage 07/29/23 15:31:33.155 - [AfterEach] [sig-api-machinery] ResourceQuota - test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:35.163: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota - tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-1160" for this suite. 07/29/23 15:31:35.174 - << End Captured GinkgoWriter Output ------------------------------- -SSS ------------------------------- -[sig-storage] Secrets - should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:89 -[BeforeEach] [sig-storage] Secrets - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:35.191 -Jul 29 15:31:35.191: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 15:31:35.195 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:35.236 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:35.241 -[BeforeEach] [sig-storage] Secrets - test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:89 -STEP: Creating secret with name secret-test-map-dc72b5ad-b0bf-44e5-990b-29b964c1ef76 07/29/23 15:31:35.246 -STEP: Creating a pod to test consume secrets 07/29/23 15:31:35.257 -Jul 29 15:31:35.274: INFO: Waiting up to 5m0s for pod "pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1" in namespace "secrets-7429" to be "Succeeded or Failed" -Jul 29 15:31:35.280: INFO: Pod "pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1": Phase="Pending", Reason="", readiness=false. Elapsed: 5.525416ms -Jul 29 15:31:37.290: INFO: Pod "pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015536231s -Jul 29 15:31:39.292: INFO: Pod "pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.0175727s -STEP: Saw pod success 07/29/23 15:31:39.292 -Jul 29 15:31:39.292: INFO: Pod "pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1" satisfied condition "Succeeded or Failed" -Jul 29 15:31:39.300: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1 container secret-volume-test: -STEP: delete the pod 07/29/23 15:31:39.315 -Jul 29 15:31:39.339: INFO: Waiting for pod pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1 to disappear -Jul 29 15:31:39.344: INFO: Pod pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1 no longer exists -[AfterEach] [sig-storage] Secrets +[It] should support CSIStorageCapacities API operations [Conformance] + test/e2e/storage/csistoragecapacity.go:49 +STEP: getting /apis 08/24/23 11:41:01.938 +STEP: getting /apis/storage.k8s.io 08/24/23 11:41:01.942 +STEP: getting /apis/storage.k8s.io/v1 08/24/23 11:41:01.944 +STEP: creating 08/24/23 11:41:01.946 +STEP: watching 08/24/23 11:41:01.974 +Aug 24 11:41:01.975: INFO: starting watch +STEP: getting 08/24/23 11:41:01.985 +STEP: listing in namespace 08/24/23 11:41:01.989 +STEP: listing across namespaces 08/24/23 11:41:01.994 +STEP: patching 08/24/23 11:41:01.999 +STEP: updating 08/24/23 11:41:02.007 +Aug 24 11:41:02.015: INFO: waiting for watch events with expected annotations in namespace +Aug 24 11:41:02.015: INFO: waiting for watch events with expected annotations across namespace +STEP: deleting 08/24/23 11:41:02.015 +STEP: deleting a collection 08/24/23 11:41:02.036 +[AfterEach] [sig-storage] CSIStorageCapacity test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:39.344: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Secrets +Aug 24 11:41:02.062: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] CSIStorageCapacity test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-storage] CSIStorageCapacity dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-storage] CSIStorageCapacity tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-7429" for this suite. 07/29/23 15:31:39.353 +STEP: Destroying namespace "csistoragecapacity-6038" for this suite. 08/24/23 11:41:02.074 ------------------------------ -• [4.177 seconds] -[sig-storage] Secrets -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:89 +• [0.201 seconds] +[sig-storage] CSIStorageCapacity +test/e2e/storage/utils/framework.go:23 + should support CSIStorageCapacities API operations [Conformance] + test/e2e/storage/csistoragecapacity.go:49 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Secrets + [BeforeEach] [sig-storage] CSIStorageCapacity set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:35.191 - Jul 29 15:31:35.191: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 15:31:35.195 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:35.236 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:35.241 - [BeforeEach] [sig-storage] Secrets + STEP: Creating a kubernetes client 08/24/23 11:41:01.9 + Aug 24 11:41:01.900: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename csistoragecapacity 08/24/23 11:41:01.902 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:01.929 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:01.935 + [BeforeEach] [sig-storage] CSIStorageCapacity test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:89 - STEP: Creating secret with name secret-test-map-dc72b5ad-b0bf-44e5-990b-29b964c1ef76 07/29/23 15:31:35.246 - STEP: Creating a pod to test consume secrets 07/29/23 15:31:35.257 - Jul 29 15:31:35.274: INFO: Waiting up to 5m0s for pod "pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1" in namespace "secrets-7429" to be "Succeeded or Failed" - Jul 29 15:31:35.280: INFO: Pod "pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1": Phase="Pending", Reason="", readiness=false. Elapsed: 5.525416ms - Jul 29 15:31:37.290: INFO: Pod "pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015536231s - Jul 29 15:31:39.292: INFO: Pod "pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.0175727s - STEP: Saw pod success 07/29/23 15:31:39.292 - Jul 29 15:31:39.292: INFO: Pod "pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1" satisfied condition "Succeeded or Failed" - Jul 29 15:31:39.300: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1 container secret-volume-test: - STEP: delete the pod 07/29/23 15:31:39.315 - Jul 29 15:31:39.339: INFO: Waiting for pod pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1 to disappear - Jul 29 15:31:39.344: INFO: Pod pod-secrets-fad7e091-153e-45af-9c3f-4627dc7718d1 no longer exists - [AfterEach] [sig-storage] Secrets + [It] should support CSIStorageCapacities API operations [Conformance] + test/e2e/storage/csistoragecapacity.go:49 + STEP: getting /apis 08/24/23 11:41:01.938 + STEP: getting /apis/storage.k8s.io 08/24/23 11:41:01.942 + STEP: getting /apis/storage.k8s.io/v1 08/24/23 11:41:01.944 + STEP: creating 08/24/23 11:41:01.946 + STEP: watching 08/24/23 11:41:01.974 + Aug 24 11:41:01.975: INFO: starting watch + STEP: getting 08/24/23 11:41:01.985 + STEP: listing in namespace 08/24/23 11:41:01.989 + STEP: listing across namespaces 08/24/23 11:41:01.994 + STEP: patching 08/24/23 11:41:01.999 + STEP: updating 08/24/23 11:41:02.007 + Aug 24 11:41:02.015: INFO: waiting for watch events with expected annotations in namespace + Aug 24 11:41:02.015: INFO: waiting for watch events with expected annotations across namespace + STEP: deleting 08/24/23 11:41:02.015 + STEP: deleting a collection 08/24/23 11:41:02.036 + [AfterEach] [sig-storage] CSIStorageCapacity test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:39.344: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Secrets + Aug 24 11:41:02.062: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] CSIStorageCapacity test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-storage] CSIStorageCapacity dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-storage] CSIStorageCapacity tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-7429" for this suite. 07/29/23 15:31:39.353 + STEP: Destroying namespace "csistoragecapacity-6038" for this suite. 08/24/23 11:41:02.074 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SS ------------------------------ -[sig-node] RuntimeClass - should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:104 -[BeforeEach] [sig-node] RuntimeClass +[sig-node] InitContainer [NodeConformance] + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + test/e2e/common/node/init_container.go:334 +[BeforeEach] [sig-node] InitContainer [NodeConformance] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:39.373 -Jul 29 15:31:39.373: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename runtimeclass 07/29/23 15:31:39.377 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:39.405 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:39.408 -[BeforeEach] [sig-node] RuntimeClass +STEP: Creating a kubernetes client 08/24/23 11:41:02.104 +Aug 24 11:41:02.104: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename init-container 08/24/23 11:41:02.107 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:02.18 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:02.185 +[BeforeEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:31 -[It] should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:104 -Jul 29 15:31:39.444: INFO: Waiting up to 1m20s for at least 1 pods in namespace runtimeclass-4980 to be scheduled -Jul 29 15:31:39.453: INFO: 1 pods are not scheduled: [runtimeclass-4980/test-runtimeclass-runtimeclass-4980-preconfigured-handler-6tm7h(495f311f-6b61-4b2e-af6d-1e75642d1eeb)] -[AfterEach] [sig-node] RuntimeClass +[BeforeEach] [sig-node] InitContainer [NodeConformance] + test/e2e/common/node/init_container.go:165 +[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] + test/e2e/common/node/init_container.go:334 +STEP: creating the pod 08/24/23 11:41:02.208 +Aug 24 11:41:02.209: INFO: PodSpec: initContainers in spec.initContainers +Aug 24 11:41:43.857: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-2941ac8c-c70b-4878-b01a-5ab6e1e53abc", GenerateName:"", Namespace:"init-container-5099", SelfLink:"", UID:"d4d8a34c-64fd-4b87-86d9-f40af842551f", ResourceVersion:"4183", Generation:0, CreationTimestamp:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"208999300"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"e2e.test", Operation:"Update", APIVersion:"v1", Time:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc0014c1320), Subresource:""}, v1.ManagedFieldsEntry{Manager:"kubelet", Operation:"Update", APIVersion:"v1", Time:time.Date(2023, time.August, 24, 11, 41, 43, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc0014c1368), Subresource:"status"}}}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-api-access-4pwnk", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(0xc003aa71c0), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil), Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-4pwnk", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil), Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-4pwnk", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"registry.k8s.io/pause:3.9", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-4pwnk", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc007152760), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"pe9deep4seen-3", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc000736460), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0071527f0)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc007152810)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc007152818), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc00715281c), PreemptionPolicy:(*v1.PreemptionPolicy)(0xc000aff790), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil), OS:(*v1.PodOS)(nil), HostUsers:(*bool)(nil), SchedulingGates:[]v1.PodSchedulingGate(nil), ResourceClaims:[]v1.PodResourceClaim(nil)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"192.168.121.130", PodIP:"10.233.66.105", PodIPs:[]v1.PodIP{v1.PodIP{IP:"10.233.66.105"}}, StartTime:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc000736540)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0007365b0)}, Ready:false, RestartCount:3, Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", ImageID:"registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937", ContainerID:"cri-o://3ff443c9a48c63f15cf2e36319b63ea982667e37cedd53313b9bf936d54a4d78", Started:(*bool)(nil)}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc003aa7240), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", ImageID:"", ContainerID:"", Started:(*bool)(nil)}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc003aa7220), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"registry.k8s.io/pause:3.9", ImageID:"", ContainerID:"", Started:(*bool)(0xc007152894)}}, QOSClass:"Burstable", EphemeralContainerStatuses:[]v1.ContainerStatus(nil)}} +[AfterEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/node/init/init.go:32 -Jul 29 15:31:41.471: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] RuntimeClass +Aug 24 11:41:43.864: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] RuntimeClass +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] RuntimeClass +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] tear down framework | framework.go:193 -STEP: Destroying namespace "runtimeclass-4980" for this suite. 07/29/23 15:31:41.477 +STEP: Destroying namespace "init-container-5099" for this suite. 08/24/23 11:41:43.882 ------------------------------ -• [2.114 seconds] -[sig-node] RuntimeClass +• [SLOW TEST] [41.794 seconds] +[sig-node] InitContainer [NodeConformance] test/e2e/common/node/framework.go:23 - should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:104 + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + test/e2e/common/node/init_container.go:334 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] RuntimeClass + [BeforeEach] [sig-node] InitContainer [NodeConformance] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:39.373 - Jul 29 15:31:39.373: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename runtimeclass 07/29/23 15:31:39.377 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:39.405 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:39.408 - [BeforeEach] [sig-node] RuntimeClass + STEP: Creating a kubernetes client 08/24/23 11:41:02.104 + Aug 24 11:41:02.104: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename init-container 08/24/23 11:41:02.107 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:02.18 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:02.185 + [BeforeEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:31 - [It] should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:104 - Jul 29 15:31:39.444: INFO: Waiting up to 1m20s for at least 1 pods in namespace runtimeclass-4980 to be scheduled - Jul 29 15:31:39.453: INFO: 1 pods are not scheduled: [runtimeclass-4980/test-runtimeclass-runtimeclass-4980-preconfigured-handler-6tm7h(495f311f-6b61-4b2e-af6d-1e75642d1eeb)] - [AfterEach] [sig-node] RuntimeClass + [BeforeEach] [sig-node] InitContainer [NodeConformance] + test/e2e/common/node/init_container.go:165 + [It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] + test/e2e/common/node/init_container.go:334 + STEP: creating the pod 08/24/23 11:41:02.208 + Aug 24 11:41:02.209: INFO: PodSpec: initContainers in spec.initContainers + Aug 24 11:41:43.857: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-2941ac8c-c70b-4878-b01a-5ab6e1e53abc", GenerateName:"", Namespace:"init-container-5099", SelfLink:"", UID:"d4d8a34c-64fd-4b87-86d9-f40af842551f", ResourceVersion:"4183", Generation:0, CreationTimestamp:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"208999300"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"e2e.test", Operation:"Update", APIVersion:"v1", Time:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc0014c1320), Subresource:""}, v1.ManagedFieldsEntry{Manager:"kubelet", Operation:"Update", APIVersion:"v1", Time:time.Date(2023, time.August, 24, 11, 41, 43, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc0014c1368), Subresource:"status"}}}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-api-access-4pwnk", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(0xc003aa71c0), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil), Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-4pwnk", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil), Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-4pwnk", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"registry.k8s.io/pause:3.9", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-4pwnk", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc007152760), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"pe9deep4seen-3", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc000736460), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0071527f0)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc007152810)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc007152818), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc00715281c), PreemptionPolicy:(*v1.PreemptionPolicy)(0xc000aff790), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil), OS:(*v1.PodOS)(nil), HostUsers:(*bool)(nil), SchedulingGates:[]v1.PodSchedulingGate(nil), ResourceClaims:[]v1.PodResourceClaim(nil)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"192.168.121.130", PodIP:"10.233.66.105", PodIPs:[]v1.PodIP{v1.PodIP{IP:"10.233.66.105"}}, StartTime:time.Date(2023, time.August, 24, 11, 41, 2, 0, time.Local), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc000736540)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0007365b0)}, Ready:false, RestartCount:3, Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", ImageID:"registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937", ContainerID:"cri-o://3ff443c9a48c63f15cf2e36319b63ea982667e37cedd53313b9bf936d54a4d78", Started:(*bool)(nil)}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc003aa7240), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", ImageID:"", ContainerID:"", Started:(*bool)(nil)}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc003aa7220), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"registry.k8s.io/pause:3.9", ImageID:"", ContainerID:"", Started:(*bool)(0xc007152894)}}, QOSClass:"Burstable", EphemeralContainerStatuses:[]v1.ContainerStatus(nil)}} + [AfterEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/node/init/init.go:32 - Jul 29 15:31:41.471: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] RuntimeClass + Aug 24 11:41:43.864: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] RuntimeClass + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] RuntimeClass + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] tear down framework | framework.go:193 - STEP: Destroying namespace "runtimeclass-4980" for this suite. 07/29/23 15:31:41.477 + STEP: Destroying namespace "init-container-5099" for this suite. 08/24/23 11:41:43.882 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] CronJob - should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] - test/e2e/apps/cronjob.go:124 -[BeforeEach] [sig-apps] CronJob +[sig-storage] Projected downwardAPI + should provide container's cpu request [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:221 +[BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:31:41.494 -Jul 29 15:31:41.495: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename cronjob 07/29/23 15:31:41.497 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:41.524 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:41.531 -[BeforeEach] [sig-apps] CronJob +STEP: Creating a kubernetes client 08/24/23 11:41:43.911 +Aug 24 11:41:43.911: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 11:41:43.916 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:43.997 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:44.001 +[BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 -[It] should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] - test/e2e/apps/cronjob.go:124 -STEP: Creating a ForbidConcurrent cronjob 07/29/23 15:31:41.535 -STEP: Ensuring a job is scheduled 07/29/23 15:31:41.545 -STEP: Ensuring exactly one is scheduled 07/29/23 15:32:01.554 -STEP: Ensuring exactly one running job exists by listing jobs explicitly 07/29/23 15:32:01.56 -STEP: Ensuring no more jobs are scheduled 07/29/23 15:32:01.566 -STEP: Removing cronjob 07/29/23 15:37:01.583 -[AfterEach] [sig-apps] CronJob +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 +[It] should provide container's cpu request [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:221 +STEP: Creating a pod to test downward API volume plugin 08/24/23 11:41:44.005 +Aug 24 11:41:44.019: INFO: Waiting up to 5m0s for pod "downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313" in namespace "projected-1818" to be "Succeeded or Failed" +Aug 24 11:41:44.024: INFO: Pod "downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313": Phase="Pending", Reason="", readiness=false. Elapsed: 4.745515ms +Aug 24 11:41:46.037: INFO: Pod "downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01728521s +Aug 24 11:41:48.032: INFO: Pod "downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012904847s +STEP: Saw pod success 08/24/23 11:41:48.032 +Aug 24 11:41:48.033: INFO: Pod "downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313" satisfied condition "Succeeded or Failed" +Aug 24 11:41:48.037: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313 container client-container: +STEP: delete the pod 08/24/23 11:41:48.062 +Aug 24 11:41:48.094: INFO: Waiting for pod downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313 to disappear +Aug 24 11:41:48.100: INFO: Pod downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 -Jul 29 15:37:01.598: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] CronJob +Aug 24 11:41:48.100: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] CronJob +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] CronJob +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 -STEP: Destroying namespace "cronjob-1708" for this suite. 07/29/23 15:37:01.61 +STEP: Destroying namespace "projected-1818" for this suite. 08/24/23 11:41:48.113 ------------------------------ -• [SLOW TEST] [320.137 seconds] -[sig-apps] CronJob -test/e2e/apps/framework.go:23 - should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] - test/e2e/apps/cronjob.go:124 +• [4.216 seconds] +[sig-storage] Projected downwardAPI +test/e2e/common/storage/framework.go:23 + should provide container's cpu request [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:221 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] CronJob + [BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:31:41.494 - Jul 29 15:31:41.495: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename cronjob 07/29/23 15:31:41.497 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:31:41.524 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:31:41.531 - [BeforeEach] [sig-apps] CronJob + STEP: Creating a kubernetes client 08/24/23 11:41:43.911 + Aug 24 11:41:43.911: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 11:41:43.916 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:43.997 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:44.001 + [BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 - [It] should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] - test/e2e/apps/cronjob.go:124 - STEP: Creating a ForbidConcurrent cronjob 07/29/23 15:31:41.535 - STEP: Ensuring a job is scheduled 07/29/23 15:31:41.545 - STEP: Ensuring exactly one is scheduled 07/29/23 15:32:01.554 - STEP: Ensuring exactly one running job exists by listing jobs explicitly 07/29/23 15:32:01.56 - STEP: Ensuring no more jobs are scheduled 07/29/23 15:32:01.566 - STEP: Removing cronjob 07/29/23 15:37:01.583 - [AfterEach] [sig-apps] CronJob + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 + [It] should provide container's cpu request [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:221 + STEP: Creating a pod to test downward API volume plugin 08/24/23 11:41:44.005 + Aug 24 11:41:44.019: INFO: Waiting up to 5m0s for pod "downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313" in namespace "projected-1818" to be "Succeeded or Failed" + Aug 24 11:41:44.024: INFO: Pod "downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313": Phase="Pending", Reason="", readiness=false. Elapsed: 4.745515ms + Aug 24 11:41:46.037: INFO: Pod "downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01728521s + Aug 24 11:41:48.032: INFO: Pod "downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012904847s + STEP: Saw pod success 08/24/23 11:41:48.032 + Aug 24 11:41:48.033: INFO: Pod "downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313" satisfied condition "Succeeded or Failed" + Aug 24 11:41:48.037: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313 container client-container: + STEP: delete the pod 08/24/23 11:41:48.062 + Aug 24 11:41:48.094: INFO: Waiting for pod downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313 to disappear + Aug 24 11:41:48.100: INFO: Pod downwardapi-volume-a8e582bf-bf8e-4ff5-8cd6-e635d4fb3313 no longer exists + [AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 - Jul 29 15:37:01.598: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] CronJob + Aug 24 11:41:48.100: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] CronJob + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] CronJob + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 - STEP: Destroying namespace "cronjob-1708" for this suite. 07/29/23 15:37:01.61 + STEP: Destroying namespace "projected-1818" for this suite. 08/24/23 11:41:48.113 << End Captured GinkgoWriter Output ------------------------------ SSSS ------------------------------ [sig-apps] ReplicaSet - should validate Replicaset Status endpoints [Conformance] - test/e2e/apps/replica_set.go:176 + should adopt matching pods on creation and release no longer matching pods [Conformance] + test/e2e/apps/replica_set.go:131 [BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:37:01.639 -Jul 29 15:37:01.639: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replicaset 07/29/23 15:37:01.649 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:37:01.699 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:37:01.708 +STEP: Creating a kubernetes client 08/24/23 11:41:48.132 +Aug 24 11:41:48.133: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replicaset 08/24/23 11:41:48.134 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:48.161 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:48.165 [BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 -[It] should validate Replicaset Status endpoints [Conformance] - test/e2e/apps/replica_set.go:176 -STEP: Create a Replicaset 07/29/23 15:37:01.736 -STEP: Verify that the required pods have come up. 07/29/23 15:37:01.794 -Jul 29 15:37:01.814: INFO: Pod name sample-pod: Found 1 pods out of 1 -STEP: ensuring each pod is running 07/29/23 15:37:01.814 -Jul 29 15:37:01.815: INFO: Waiting up to 5m0s for pod "test-rs-5jwlj" in namespace "replicaset-7413" to be "running" -Jul 29 15:37:01.854: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 38.960771ms -Jul 29 15:37:03.862: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 2.046530177s -Jul 29 15:37:05.866: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 4.051040191s -Jul 29 15:37:07.864: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 6.048569293s -Jul 29 15:37:09.864: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 8.048752073s -Jul 29 15:37:11.877: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 10.062175782s -Jul 29 15:37:13.864: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 12.049376804s -Jul 29 15:37:15.869: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 14.053986383s -Jul 29 15:37:17.862: INFO: Pod "test-rs-5jwlj": Phase="Running", Reason="", readiness=true. Elapsed: 16.046875156s -Jul 29 15:37:17.862: INFO: Pod "test-rs-5jwlj" satisfied condition "running" -STEP: Getting /status 07/29/23 15:37:17.862 -Jul 29 15:37:17.873: INFO: Replicaset test-rs has Conditions: [] -STEP: updating the Replicaset Status 07/29/23 15:37:17.873 -Jul 29 15:37:17.890: INFO: updatedStatus.Conditions: []v1.ReplicaSetCondition{v1.ReplicaSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} -STEP: watching for the ReplicaSet status to be updated 07/29/23 15:37:17.89 -Jul 29 15:37:17.895: INFO: Observed &ReplicaSet event: ADDED -Jul 29 15:37:17.896: INFO: Observed &ReplicaSet event: MODIFIED -Jul 29 15:37:17.896: INFO: Observed &ReplicaSet event: MODIFIED -Jul 29 15:37:17.896: INFO: Observed &ReplicaSet event: MODIFIED -Jul 29 15:37:17.896: INFO: Found replicaset test-rs in namespace replicaset-7413 with labels: map[name:sample-pod pod:httpd] annotations: map[] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] -Jul 29 15:37:17.897: INFO: Replicaset test-rs has an updated status -STEP: patching the Replicaset Status 07/29/23 15:37:17.897 -Jul 29 15:37:17.897: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} -Jul 29 15:37:17.917: INFO: Patched status conditions: []v1.ReplicaSetCondition{v1.ReplicaSetCondition{Type:"StatusPatched", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} -STEP: watching for the Replicaset status to be patched 07/29/23 15:37:17.917 -Jul 29 15:37:17.922: INFO: Observed &ReplicaSet event: ADDED -Jul 29 15:37:17.922: INFO: Observed &ReplicaSet event: MODIFIED -Jul 29 15:37:17.922: INFO: Observed &ReplicaSet event: MODIFIED -Jul 29 15:37:17.923: INFO: Observed &ReplicaSet event: MODIFIED -Jul 29 15:37:17.923: INFO: Observed replicaset test-rs in namespace replicaset-7413 with annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} -Jul 29 15:37:17.923: INFO: Observed &ReplicaSet event: MODIFIED -Jul 29 15:37:17.923: INFO: Found replicaset test-rs in namespace replicaset-7413 with labels: map[name:sample-pod pod:httpd] annotations: map[] & Conditions: {StatusPatched True 0001-01-01 00:00:00 +0000 UTC } -Jul 29 15:37:17.923: INFO: Replicaset test-rs has a patched status +[It] should adopt matching pods on creation and release no longer matching pods [Conformance] + test/e2e/apps/replica_set.go:131 +STEP: Given a Pod with a 'name' label pod-adoption-release is created 08/24/23 11:41:48.167 +Aug 24 11:41:48.183: INFO: Waiting up to 5m0s for pod "pod-adoption-release" in namespace "replicaset-6082" to be "running and ready" +Aug 24 11:41:48.192: INFO: Pod "pod-adoption-release": Phase="Pending", Reason="", readiness=false. Elapsed: 8.992884ms +Aug 24 11:41:48.192: INFO: The phase of Pod pod-adoption-release is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:41:50.211: INFO: Pod "pod-adoption-release": Phase="Running", Reason="", readiness=true. Elapsed: 2.02836089s +Aug 24 11:41:50.211: INFO: The phase of Pod pod-adoption-release is Running (Ready = true) +Aug 24 11:41:50.211: INFO: Pod "pod-adoption-release" satisfied condition "running and ready" +STEP: When a replicaset with a matching selector is created 08/24/23 11:41:50.237 +STEP: Then the orphan pod is adopted 08/24/23 11:41:50.248 +STEP: When the matched label of one of its pods change 08/24/23 11:41:51.264 +Aug 24 11:41:51.270: INFO: Pod name pod-adoption-release: Found 1 pods out of 1 +STEP: Then the pod is released 08/24/23 11:41:51.287 [AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 -Jul 29 15:37:17.924: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 11:41:52.312: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 -STEP: Destroying namespace "replicaset-7413" for this suite. 07/29/23 15:37:17.933 +STEP: Destroying namespace "replicaset-6082" for this suite. 08/24/23 11:41:52.32 ------------------------------ -• [SLOW TEST] [16.309 seconds] +• [4.200 seconds] [sig-apps] ReplicaSet test/e2e/apps/framework.go:23 - should validate Replicaset Status endpoints [Conformance] - test/e2e/apps/replica_set.go:176 + should adopt matching pods on creation and release no longer matching pods [Conformance] + test/e2e/apps/replica_set.go:131 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:37:01.639 - Jul 29 15:37:01.639: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replicaset 07/29/23 15:37:01.649 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:37:01.699 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:37:01.708 + STEP: Creating a kubernetes client 08/24/23 11:41:48.132 + Aug 24 11:41:48.133: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replicaset 08/24/23 11:41:48.134 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:48.161 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:48.165 [BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 - [It] should validate Replicaset Status endpoints [Conformance] - test/e2e/apps/replica_set.go:176 - STEP: Create a Replicaset 07/29/23 15:37:01.736 - STEP: Verify that the required pods have come up. 07/29/23 15:37:01.794 - Jul 29 15:37:01.814: INFO: Pod name sample-pod: Found 1 pods out of 1 - STEP: ensuring each pod is running 07/29/23 15:37:01.814 - Jul 29 15:37:01.815: INFO: Waiting up to 5m0s for pod "test-rs-5jwlj" in namespace "replicaset-7413" to be "running" - Jul 29 15:37:01.854: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 38.960771ms - Jul 29 15:37:03.862: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 2.046530177s - Jul 29 15:37:05.866: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 4.051040191s - Jul 29 15:37:07.864: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 6.048569293s - Jul 29 15:37:09.864: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 8.048752073s - Jul 29 15:37:11.877: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 10.062175782s - Jul 29 15:37:13.864: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 12.049376804s - Jul 29 15:37:15.869: INFO: Pod "test-rs-5jwlj": Phase="Pending", Reason="", readiness=false. Elapsed: 14.053986383s - Jul 29 15:37:17.862: INFO: Pod "test-rs-5jwlj": Phase="Running", Reason="", readiness=true. Elapsed: 16.046875156s - Jul 29 15:37:17.862: INFO: Pod "test-rs-5jwlj" satisfied condition "running" - STEP: Getting /status 07/29/23 15:37:17.862 - Jul 29 15:37:17.873: INFO: Replicaset test-rs has Conditions: [] - STEP: updating the Replicaset Status 07/29/23 15:37:17.873 - Jul 29 15:37:17.890: INFO: updatedStatus.Conditions: []v1.ReplicaSetCondition{v1.ReplicaSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} - STEP: watching for the ReplicaSet status to be updated 07/29/23 15:37:17.89 - Jul 29 15:37:17.895: INFO: Observed &ReplicaSet event: ADDED - Jul 29 15:37:17.896: INFO: Observed &ReplicaSet event: MODIFIED - Jul 29 15:37:17.896: INFO: Observed &ReplicaSet event: MODIFIED - Jul 29 15:37:17.896: INFO: Observed &ReplicaSet event: MODIFIED - Jul 29 15:37:17.896: INFO: Found replicaset test-rs in namespace replicaset-7413 with labels: map[name:sample-pod pod:httpd] annotations: map[] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] - Jul 29 15:37:17.897: INFO: Replicaset test-rs has an updated status - STEP: patching the Replicaset Status 07/29/23 15:37:17.897 - Jul 29 15:37:17.897: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} - Jul 29 15:37:17.917: INFO: Patched status conditions: []v1.ReplicaSetCondition{v1.ReplicaSetCondition{Type:"StatusPatched", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} - STEP: watching for the Replicaset status to be patched 07/29/23 15:37:17.917 - Jul 29 15:37:17.922: INFO: Observed &ReplicaSet event: ADDED - Jul 29 15:37:17.922: INFO: Observed &ReplicaSet event: MODIFIED - Jul 29 15:37:17.922: INFO: Observed &ReplicaSet event: MODIFIED - Jul 29 15:37:17.923: INFO: Observed &ReplicaSet event: MODIFIED - Jul 29 15:37:17.923: INFO: Observed replicaset test-rs in namespace replicaset-7413 with annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} - Jul 29 15:37:17.923: INFO: Observed &ReplicaSet event: MODIFIED - Jul 29 15:37:17.923: INFO: Found replicaset test-rs in namespace replicaset-7413 with labels: map[name:sample-pod pod:httpd] annotations: map[] & Conditions: {StatusPatched True 0001-01-01 00:00:00 +0000 UTC } - Jul 29 15:37:17.923: INFO: Replicaset test-rs has a patched status + [It] should adopt matching pods on creation and release no longer matching pods [Conformance] + test/e2e/apps/replica_set.go:131 + STEP: Given a Pod with a 'name' label pod-adoption-release is created 08/24/23 11:41:48.167 + Aug 24 11:41:48.183: INFO: Waiting up to 5m0s for pod "pod-adoption-release" in namespace "replicaset-6082" to be "running and ready" + Aug 24 11:41:48.192: INFO: Pod "pod-adoption-release": Phase="Pending", Reason="", readiness=false. Elapsed: 8.992884ms + Aug 24 11:41:48.192: INFO: The phase of Pod pod-adoption-release is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:41:50.211: INFO: Pod "pod-adoption-release": Phase="Running", Reason="", readiness=true. Elapsed: 2.02836089s + Aug 24 11:41:50.211: INFO: The phase of Pod pod-adoption-release is Running (Ready = true) + Aug 24 11:41:50.211: INFO: Pod "pod-adoption-release" satisfied condition "running and ready" + STEP: When a replicaset with a matching selector is created 08/24/23 11:41:50.237 + STEP: Then the orphan pod is adopted 08/24/23 11:41:50.248 + STEP: When the matched label of one of its pods change 08/24/23 11:41:51.264 + Aug 24 11:41:51.270: INFO: Pod name pod-adoption-release: Found 1 pods out of 1 + STEP: Then the pod is released 08/24/23 11:41:51.287 [AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 - Jul 29 15:37:17.924: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 11:41:52.312: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 - STEP: Destroying namespace "replicaset-7413" for this suite. 07/29/23 15:37:17.933 + STEP: Destroying namespace "replicaset-6082" for this suite. 08/24/23 11:41:52.32 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSS ------------------------------ [sig-storage] Secrets - should be immutable if `immutable` field is set [Conformance] - test/e2e/common/storage/secrets_volume.go:386 + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:89 [BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:37:17.952 -Jul 29 15:37:17.952: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 15:37:17.957 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:37:17.985 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:37:17.993 +STEP: Creating a kubernetes client 08/24/23 11:41:52.342 +Aug 24 11:41:52.343: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 11:41:52.347 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:52.376 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:52.382 [BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 -[It] should be immutable if `immutable` field is set [Conformance] - test/e2e/common/storage/secrets_volume.go:386 +[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:89 +STEP: Creating secret with name secret-test-map-d895e7da-b0de-4775-be72-0c1b9455dfe1 08/24/23 11:41:52.386 +STEP: Creating a pod to test consume secrets 08/24/23 11:41:52.397 +Aug 24 11:41:52.414: INFO: Waiting up to 5m0s for pod "pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404" in namespace "secrets-9933" to be "Succeeded or Failed" +Aug 24 11:41:52.425: INFO: Pod "pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404": Phase="Pending", Reason="", readiness=false. Elapsed: 10.52098ms +Aug 24 11:41:54.435: INFO: Pod "pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020555948s +Aug 24 11:41:56.437: INFO: Pod "pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022931478s +STEP: Saw pod success 08/24/23 11:41:56.438 +Aug 24 11:41:56.438: INFO: Pod "pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404" satisfied condition "Succeeded or Failed" +Aug 24 11:41:56.444: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404 container secret-volume-test: +STEP: delete the pod 08/24/23 11:41:56.456 +Aug 24 11:41:56.474: INFO: Waiting for pod pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404 to disappear +Aug 24 11:41:56.479: INFO: Pod pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404 no longer exists [AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 15:37:18.094: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 11:41:56.480: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-9664" for this suite. 07/29/23 15:37:18.103 +STEP: Destroying namespace "secrets-9933" for this suite. 08/24/23 11:41:56.488 ------------------------------ -• [0.174 seconds] +• [4.157 seconds] [sig-storage] Secrets test/e2e/common/storage/framework.go:23 - should be immutable if `immutable` field is set [Conformance] - test/e2e/common/storage/secrets_volume.go:386 + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:89 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:37:17.952 - Jul 29 15:37:17.952: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 15:37:17.957 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:37:17.985 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:37:17.993 + STEP: Creating a kubernetes client 08/24/23 11:41:52.342 + Aug 24 11:41:52.343: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 11:41:52.347 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:52.376 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:52.382 [BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 - [It] should be immutable if `immutable` field is set [Conformance] - test/e2e/common/storage/secrets_volume.go:386 + [It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:89 + STEP: Creating secret with name secret-test-map-d895e7da-b0de-4775-be72-0c1b9455dfe1 08/24/23 11:41:52.386 + STEP: Creating a pod to test consume secrets 08/24/23 11:41:52.397 + Aug 24 11:41:52.414: INFO: Waiting up to 5m0s for pod "pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404" in namespace "secrets-9933" to be "Succeeded or Failed" + Aug 24 11:41:52.425: INFO: Pod "pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404": Phase="Pending", Reason="", readiness=false. Elapsed: 10.52098ms + Aug 24 11:41:54.435: INFO: Pod "pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020555948s + Aug 24 11:41:56.437: INFO: Pod "pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022931478s + STEP: Saw pod success 08/24/23 11:41:56.438 + Aug 24 11:41:56.438: INFO: Pod "pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404" satisfied condition "Succeeded or Failed" + Aug 24 11:41:56.444: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404 container secret-volume-test: + STEP: delete the pod 08/24/23 11:41:56.456 + Aug 24 11:41:56.474: INFO: Waiting for pod pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404 to disappear + Aug 24 11:41:56.479: INFO: Pod pod-secrets-b51e94bd-0928-4a49-a5e0-35a0b9ac0404 no longer exists [AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 15:37:18.094: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 11:41:56.480: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-9664" for this suite. 07/29/23 15:37:18.103 + STEP: Destroying namespace "secrets-9933" for this suite. 08/24/23 11:41:56.488 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSS ------------------------------ -[sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces - should list and delete a collection of PodDisruptionBudgets [Conformance] - test/e2e/apps/disruption.go:87 -[BeforeEach] [sig-apps] DisruptionController - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:37:18.129 -Jul 29 15:37:18.129: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename disruption 07/29/23 15:37:18.134 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:37:18.158 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:37:18.162 -[BeforeEach] [sig-apps] DisruptionController - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] DisruptionController - test/e2e/apps/disruption.go:72 -[BeforeEach] Listing PodDisruptionBudgets for all namespaces +[sig-apps] Daemon set [Serial] + should rollback without unnecessary restarts [Conformance] + test/e2e/apps/daemon_set.go:443 +[BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:37:18.167 -Jul 29 15:37:18.167: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename disruption-2 07/29/23 15:37:18.17 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:37:18.207 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:37:18.213 -[BeforeEach] Listing PodDisruptionBudgets for all namespaces +STEP: Creating a kubernetes client 08/24/23 11:41:56.5 +Aug 24 11:41:56.500: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename daemonsets 08/24/23 11:41:56.501 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:56.537 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:56.544 +[BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] should list and delete a collection of PodDisruptionBudgets [Conformance] - test/e2e/apps/disruption.go:87 -STEP: Waiting for the pdb to be processed 07/29/23 15:37:18.233 -STEP: Waiting for the pdb to be processed 07/29/23 15:37:20.258 -STEP: Waiting for the pdb to be processed 07/29/23 15:37:20.276 -STEP: listing a collection of PDBs across all namespaces 07/29/23 15:37:22.29 -STEP: listing a collection of PDBs in namespace disruption-7870 07/29/23 15:37:22.296 -STEP: deleting a collection of PDBs 07/29/23 15:37:22.305 -STEP: Waiting for the PDB collection to be deleted 07/29/23 15:37:22.334 -[AfterEach] Listing PodDisruptionBudgets for all namespaces - test/e2e/framework/node/init/init.go:32 -Jul 29 15:37:22.340: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-apps] DisruptionController +[BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 +[It] should rollback without unnecessary restarts [Conformance] + test/e2e/apps/daemon_set.go:443 +Aug 24 11:41:56.601: INFO: Create a RollingUpdate DaemonSet +Aug 24 11:41:56.621: INFO: Check that daemon pods launch on every node of the cluster +Aug 24 11:41:56.637: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 11:41:56.637: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:41:57.664: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 11:41:57.664: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:41:58.675: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 11:41:58.675: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 11:41:59.652: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 +Aug 24 11:41:59.652: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set +Aug 24 11:41:59.652: INFO: Update the DaemonSet to trigger a rollout +Aug 24 11:41:59.673: INFO: Updating DaemonSet daemon-set +Aug 24 11:42:02.727: INFO: Roll back the DaemonSet before rollout is complete +Aug 24 11:42:02.745: INFO: Updating DaemonSet daemon-set +Aug 24 11:42:02.745: INFO: Make sure DaemonSet rollback is complete +Aug 24 11:42:02.760: INFO: Wrong image for pod: daemon-set-gznn7. Expected: registry.k8s.io/e2e-test-images/httpd:2.4.38-4, got: foo:non-existent. +Aug 24 11:42:02.760: INFO: Pod daemon-set-gznn7 is not available +Aug 24 11:42:08.775: INFO: Pod daemon-set-zbhft is not available +[AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 +STEP: Deleting DaemonSet "daemon-set" 08/24/23 11:42:08.794 +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-790, will wait for the garbage collector to delete the pods 08/24/23 11:42:08.794 +Aug 24 11:42:08.860: INFO: Deleting DaemonSet.extensions daemon-set took: 10.458247ms +Aug 24 11:42:08.961: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.903484ms +Aug 24 11:42:12.367: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 11:42:12.367: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set +Aug 24 11:42:12.374: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"4480"},"items":null} + +Aug 24 11:42:12.380: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"4480"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 15:37:22.350: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces - dump namespaces | framework.go:196 -[DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces - tear down framework | framework.go:193 -STEP: Destroying namespace "disruption-2-3221" for this suite. 07/29/23 15:37:22.361 -[DeferCleanup (Each)] [sig-apps] DisruptionController +Aug 24 11:42:12.407: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] DisruptionController +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] DisruptionController +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "disruption-7870" for this suite. 07/29/23 15:37:22.379 +STEP: Destroying namespace "daemonsets-790" for this suite. 08/24/23 11:42:12.415 ------------------------------ -• [4.270 seconds] -[sig-apps] DisruptionController +• [SLOW TEST] [15.930 seconds] +[sig-apps] Daemon set [Serial] test/e2e/apps/framework.go:23 - Listing PodDisruptionBudgets for all namespaces - test/e2e/apps/disruption.go:78 - should list and delete a collection of PodDisruptionBudgets [Conformance] - test/e2e/apps/disruption.go:87 + should rollback without unnecessary restarts [Conformance] + test/e2e/apps/daemon_set.go:443 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] DisruptionController - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:37:18.129 - Jul 29 15:37:18.129: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename disruption 07/29/23 15:37:18.134 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:37:18.158 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:37:18.162 - [BeforeEach] [sig-apps] DisruptionController - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] DisruptionController - test/e2e/apps/disruption.go:72 - [BeforeEach] Listing PodDisruptionBudgets for all namespaces + [BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:37:18.167 - Jul 29 15:37:18.167: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename disruption-2 07/29/23 15:37:18.17 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:37:18.207 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:37:18.213 - [BeforeEach] Listing PodDisruptionBudgets for all namespaces + STEP: Creating a kubernetes client 08/24/23 11:41:56.5 + Aug 24 11:41:56.500: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename daemonsets 08/24/23 11:41:56.501 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:41:56.537 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:41:56.544 + [BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should list and delete a collection of PodDisruptionBudgets [Conformance] - test/e2e/apps/disruption.go:87 - STEP: Waiting for the pdb to be processed 07/29/23 15:37:18.233 - STEP: Waiting for the pdb to be processed 07/29/23 15:37:20.258 - STEP: Waiting for the pdb to be processed 07/29/23 15:37:20.276 - STEP: listing a collection of PDBs across all namespaces 07/29/23 15:37:22.29 - STEP: listing a collection of PDBs in namespace disruption-7870 07/29/23 15:37:22.296 - STEP: deleting a collection of PDBs 07/29/23 15:37:22.305 - STEP: Waiting for the PDB collection to be deleted 07/29/23 15:37:22.334 - [AfterEach] Listing PodDisruptionBudgets for all namespaces - test/e2e/framework/node/init/init.go:32 - Jul 29 15:37:22.340: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-apps] DisruptionController - test/e2e/framework/node/init/init.go:32 - Jul 29 15:37:22.350: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces - dump namespaces | framework.go:196 - [DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces - tear down framework | framework.go:193 - STEP: Destroying namespace "disruption-2-3221" for this suite. 07/29/23 15:37:22.361 - [DeferCleanup (Each)] [sig-apps] DisruptionController - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] DisruptionController - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] DisruptionController - tear down framework | framework.go:193 - STEP: Destroying namespace "disruption-7870" for this suite. 07/29/23 15:37:22.379 - << End Captured GinkgoWriter Output ------------------------------- -SSS ------------------------------- -[sig-node] Probing container - should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:152 -[BeforeEach] [sig-node] Probing container - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:37:22.401 -Jul 29 15:37:22.401: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-probe 07/29/23 15:37:22.403 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:37:22.434 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:37:22.44 -[BeforeEach] [sig-node] Probing container - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 -[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:152 -STEP: Creating pod busybox-d7222514-73a5-4cbf-94a1-22214ea7b033 in namespace container-probe-7486 07/29/23 15:37:22.446 -Jul 29 15:37:22.471: INFO: Waiting up to 5m0s for pod "busybox-d7222514-73a5-4cbf-94a1-22214ea7b033" in namespace "container-probe-7486" to be "not pending" -Jul 29 15:37:22.484: INFO: Pod "busybox-d7222514-73a5-4cbf-94a1-22214ea7b033": Phase="Pending", Reason="", readiness=false. Elapsed: 13.282449ms -Jul 29 15:37:24.492: INFO: Pod "busybox-d7222514-73a5-4cbf-94a1-22214ea7b033": Phase="Running", Reason="", readiness=true. Elapsed: 2.020369533s -Jul 29 15:37:24.492: INFO: Pod "busybox-d7222514-73a5-4cbf-94a1-22214ea7b033" satisfied condition "not pending" -Jul 29 15:37:24.492: INFO: Started pod busybox-d7222514-73a5-4cbf-94a1-22214ea7b033 in namespace container-probe-7486 -STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 15:37:24.492 -Jul 29 15:37:24.496: INFO: Initial restart count of pod busybox-d7222514-73a5-4cbf-94a1-22214ea7b033 is 0 -STEP: deleting the pod 07/29/23 15:41:25.775 -[AfterEach] [sig-node] Probing container - test/e2e/framework/node/init/init.go:32 -Jul 29 15:41:25.802: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Probing container - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Probing container - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Probing container - tear down framework | framework.go:193 -STEP: Destroying namespace "container-probe-7486" for this suite. 07/29/23 15:41:25.84 ------------------------------- -• [SLOW TEST] [243.451 seconds] -[sig-node] Probing container -test/e2e/common/node/framework.go:23 - should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:152 + [BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 + [It] should rollback without unnecessary restarts [Conformance] + test/e2e/apps/daemon_set.go:443 + Aug 24 11:41:56.601: INFO: Create a RollingUpdate DaemonSet + Aug 24 11:41:56.621: INFO: Check that daemon pods launch on every node of the cluster + Aug 24 11:41:56.637: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 11:41:56.637: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:41:57.664: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 11:41:57.664: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:41:58.675: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 11:41:58.675: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 11:41:59.652: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 + Aug 24 11:41:59.652: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set + Aug 24 11:41:59.652: INFO: Update the DaemonSet to trigger a rollout + Aug 24 11:41:59.673: INFO: Updating DaemonSet daemon-set + Aug 24 11:42:02.727: INFO: Roll back the DaemonSet before rollout is complete + Aug 24 11:42:02.745: INFO: Updating DaemonSet daemon-set + Aug 24 11:42:02.745: INFO: Make sure DaemonSet rollback is complete + Aug 24 11:42:02.760: INFO: Wrong image for pod: daemon-set-gznn7. Expected: registry.k8s.io/e2e-test-images/httpd:2.4.38-4, got: foo:non-existent. + Aug 24 11:42:02.760: INFO: Pod daemon-set-gznn7 is not available + Aug 24 11:42:08.775: INFO: Pod daemon-set-zbhft is not available + [AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 + STEP: Deleting DaemonSet "daemon-set" 08/24/23 11:42:08.794 + STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-790, will wait for the garbage collector to delete the pods 08/24/23 11:42:08.794 + Aug 24 11:42:08.860: INFO: Deleting DaemonSet.extensions daemon-set took: 10.458247ms + Aug 24 11:42:08.961: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.903484ms + Aug 24 11:42:12.367: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 11:42:12.367: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set + Aug 24 11:42:12.374: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"4480"},"items":null} - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Probing container - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:37:22.401 - Jul 29 15:37:22.401: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-probe 07/29/23 15:37:22.403 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:37:22.434 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:37:22.44 - [BeforeEach] [sig-node] Probing container - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 - [It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:152 - STEP: Creating pod busybox-d7222514-73a5-4cbf-94a1-22214ea7b033 in namespace container-probe-7486 07/29/23 15:37:22.446 - Jul 29 15:37:22.471: INFO: Waiting up to 5m0s for pod "busybox-d7222514-73a5-4cbf-94a1-22214ea7b033" in namespace "container-probe-7486" to be "not pending" - Jul 29 15:37:22.484: INFO: Pod "busybox-d7222514-73a5-4cbf-94a1-22214ea7b033": Phase="Pending", Reason="", readiness=false. Elapsed: 13.282449ms - Jul 29 15:37:24.492: INFO: Pod "busybox-d7222514-73a5-4cbf-94a1-22214ea7b033": Phase="Running", Reason="", readiness=true. Elapsed: 2.020369533s - Jul 29 15:37:24.492: INFO: Pod "busybox-d7222514-73a5-4cbf-94a1-22214ea7b033" satisfied condition "not pending" - Jul 29 15:37:24.492: INFO: Started pod busybox-d7222514-73a5-4cbf-94a1-22214ea7b033 in namespace container-probe-7486 - STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 15:37:24.492 - Jul 29 15:37:24.496: INFO: Initial restart count of pod busybox-d7222514-73a5-4cbf-94a1-22214ea7b033 is 0 - STEP: deleting the pod 07/29/23 15:41:25.775 - [AfterEach] [sig-node] Probing container - test/e2e/framework/node/init/init.go:32 - Jul 29 15:41:25.802: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Probing container - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Probing container - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Probing container - tear down framework | framework.go:193 - STEP: Destroying namespace "container-probe-7486" for this suite. 07/29/23 15:41:25.84 - << End Captured GinkgoWriter Output ------------------------------- -[sig-auth] ServiceAccounts - should allow opting out of API token automount [Conformance] - test/e2e/auth/service_accounts.go:161 -[BeforeEach] [sig-auth] ServiceAccounts - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:41:25.853 -Jul 29 15:41:25.863: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename svcaccounts 07/29/23 15:41:25.878 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:41:25.903 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:41:25.912 -[BeforeEach] [sig-auth] ServiceAccounts - test/e2e/framework/metrics/init/init.go:31 -[It] should allow opting out of API token automount [Conformance] - test/e2e/auth/service_accounts.go:161 -Jul 29 15:41:25.950: INFO: created pod pod-service-account-defaultsa -Jul 29 15:41:25.950: INFO: pod pod-service-account-defaultsa service account token volume mount: true -Jul 29 15:41:25.979: INFO: created pod pod-service-account-mountsa -Jul 29 15:41:25.979: INFO: pod pod-service-account-mountsa service account token volume mount: true -Jul 29 15:41:25.994: INFO: created pod pod-service-account-nomountsa -Jul 29 15:41:25.995: INFO: pod pod-service-account-nomountsa service account token volume mount: false -Jul 29 15:41:26.019: INFO: created pod pod-service-account-defaultsa-mountspec -Jul 29 15:41:26.019: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true -Jul 29 15:41:26.041: INFO: created pod pod-service-account-mountsa-mountspec -Jul 29 15:41:26.041: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true -Jul 29 15:41:26.061: INFO: created pod pod-service-account-nomountsa-mountspec -Jul 29 15:41:26.062: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true -Jul 29 15:41:26.071: INFO: created pod pod-service-account-defaultsa-nomountspec -Jul 29 15:41:26.072: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false -Jul 29 15:41:26.096: INFO: created pod pod-service-account-mountsa-nomountspec -Jul 29 15:41:26.096: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false -Jul 29 15:41:26.123: INFO: created pod pod-service-account-nomountsa-nomountspec -Jul 29 15:41:26.123: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false -[AfterEach] [sig-auth] ServiceAccounts - test/e2e/framework/node/init/init.go:32 -Jul 29 15:41:26.124: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-auth] ServiceAccounts - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts - tear down framework | framework.go:193 -STEP: Destroying namespace "svcaccounts-5768" for this suite. 07/29/23 15:41:26.151 ------------------------------- -• [0.336 seconds] -[sig-auth] ServiceAccounts -test/e2e/auth/framework.go:23 - should allow opting out of API token automount [Conformance] - test/e2e/auth/service_accounts.go:161 + Aug 24 11:42:12.380: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"4480"},"items":null} - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-auth] ServiceAccounts - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:41:25.853 - Jul 29 15:41:25.863: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename svcaccounts 07/29/23 15:41:25.878 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:41:25.903 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:41:25.912 - [BeforeEach] [sig-auth] ServiceAccounts - test/e2e/framework/metrics/init/init.go:31 - [It] should allow opting out of API token automount [Conformance] - test/e2e/auth/service_accounts.go:161 - Jul 29 15:41:25.950: INFO: created pod pod-service-account-defaultsa - Jul 29 15:41:25.950: INFO: pod pod-service-account-defaultsa service account token volume mount: true - Jul 29 15:41:25.979: INFO: created pod pod-service-account-mountsa - Jul 29 15:41:25.979: INFO: pod pod-service-account-mountsa service account token volume mount: true - Jul 29 15:41:25.994: INFO: created pod pod-service-account-nomountsa - Jul 29 15:41:25.995: INFO: pod pod-service-account-nomountsa service account token volume mount: false - Jul 29 15:41:26.019: INFO: created pod pod-service-account-defaultsa-mountspec - Jul 29 15:41:26.019: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true - Jul 29 15:41:26.041: INFO: created pod pod-service-account-mountsa-mountspec - Jul 29 15:41:26.041: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true - Jul 29 15:41:26.061: INFO: created pod pod-service-account-nomountsa-mountspec - Jul 29 15:41:26.062: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true - Jul 29 15:41:26.071: INFO: created pod pod-service-account-defaultsa-nomountspec - Jul 29 15:41:26.072: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false - Jul 29 15:41:26.096: INFO: created pod pod-service-account-mountsa-nomountspec - Jul 29 15:41:26.096: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false - Jul 29 15:41:26.123: INFO: created pod pod-service-account-nomountsa-nomountspec - Jul 29 15:41:26.123: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false - [AfterEach] [sig-auth] ServiceAccounts + [AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 15:41:26.124: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + Aug 24 11:42:12.407: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "svcaccounts-5768" for this suite. 07/29/23 15:41:26.151 + STEP: Destroying namespace "daemonsets-790" for this suite. 08/24/23 11:42:12.415 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSS ------------------------------ [sig-storage] EmptyDir volumes - pod should support shared volumes between containers [Conformance] - test/e2e/common/storage/empty_dir.go:227 + should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:107 [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:41:26.21 -Jul 29 15:41:26.211: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 15:41:26.221 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:41:26.254 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:41:26.266 +STEP: Creating a kubernetes client 08/24/23 11:42:12.433 +Aug 24 11:42:12.433: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 11:42:12.436 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:42:12.47 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:42:12.477 [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[It] pod should support shared volumes between containers [Conformance] - test/e2e/common/storage/empty_dir.go:227 -STEP: Creating Pod 07/29/23 15:41:26.273 -Jul 29 15:41:26.296: INFO: Waiting up to 5m0s for pod "pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e" in namespace "emptydir-9421" to be "running" -Jul 29 15:41:26.308: INFO: Pod "pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e": Phase="Pending", Reason="", readiness=false. Elapsed: 12.33237ms -Jul 29 15:41:28.322: INFO: Pod "pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.025864867s -Jul 29 15:41:30.323: INFO: Pod "pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e": Phase="Running", Reason="", readiness=false. Elapsed: 4.026458319s -Jul 29 15:41:30.323: INFO: Pod "pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e" satisfied condition "running" -STEP: Reading file content from the nginx-container 07/29/23 15:41:30.323 -Jul 29 15:41:30.323: INFO: ExecWithOptions {Command:[/bin/sh -c cat /usr/share/volumeshare/shareddata.txt] Namespace:emptydir-9421 PodName:pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e ContainerName:busybox-main-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 15:41:30.325: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 15:41:30.330: INFO: ExecWithOptions: Clientset creation -Jul 29 15:41:30.331: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/emptydir-9421/pods/pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e/exec?command=%2Fbin%2Fsh&command=-c&command=cat+%2Fusr%2Fshare%2Fvolumeshare%2Fshareddata.txt&container=busybox-main-container&container=busybox-main-container&stderr=true&stdout=true) -Jul 29 15:41:30.452: INFO: Exec stderr: "" +[It] should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:107 +STEP: Creating a pod to test emptydir 0666 on tmpfs 08/24/23 11:42:12.483 +Aug 24 11:42:12.496: INFO: Waiting up to 5m0s for pod "pod-7803c93c-9051-4327-a2e2-9c878715fe0b" in namespace "emptydir-7113" to be "Succeeded or Failed" +Aug 24 11:42:12.502: INFO: Pod "pod-7803c93c-9051-4327-a2e2-9c878715fe0b": Phase="Pending", Reason="", readiness=false. Elapsed: 5.929422ms +Aug 24 11:42:14.518: INFO: Pod "pod-7803c93c-9051-4327-a2e2-9c878715fe0b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021613182s +Aug 24 11:42:16.531: INFO: Pod "pod-7803c93c-9051-4327-a2e2-9c878715fe0b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.034391378s +STEP: Saw pod success 08/24/23 11:42:16.531 +Aug 24 11:42:16.532: INFO: Pod "pod-7803c93c-9051-4327-a2e2-9c878715fe0b" satisfied condition "Succeeded or Failed" +Aug 24 11:42:16.539: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-7803c93c-9051-4327-a2e2-9c878715fe0b container test-container: +STEP: delete the pod 08/24/23 11:42:16.551 +Aug 24 11:42:16.572: INFO: Waiting for pod pod-7803c93c-9051-4327-a2e2-9c878715fe0b to disappear +Aug 24 11:42:16.579: INFO: Pod pod-7803c93c-9051-4327-a2e2-9c878715fe0b no longer exists [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 15:41:30.453: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 11:42:16.579: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-9421" for this suite. 07/29/23 15:41:30.463 +STEP: Destroying namespace "emptydir-7113" for this suite. 08/24/23 11:42:16.592 ------------------------------ -• [4.271 seconds] +• [4.172 seconds] [sig-storage] EmptyDir volumes test/e2e/common/storage/framework.go:23 - pod should support shared volumes between containers [Conformance] - test/e2e/common/storage/empty_dir.go:227 + should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:107 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:41:26.21 - Jul 29 15:41:26.211: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 15:41:26.221 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:41:26.254 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:41:26.266 + STEP: Creating a kubernetes client 08/24/23 11:42:12.433 + Aug 24 11:42:12.433: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 11:42:12.436 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:42:12.47 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:42:12.477 [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [It] pod should support shared volumes between containers [Conformance] - test/e2e/common/storage/empty_dir.go:227 - STEP: Creating Pod 07/29/23 15:41:26.273 - Jul 29 15:41:26.296: INFO: Waiting up to 5m0s for pod "pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e" in namespace "emptydir-9421" to be "running" - Jul 29 15:41:26.308: INFO: Pod "pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e": Phase="Pending", Reason="", readiness=false. Elapsed: 12.33237ms - Jul 29 15:41:28.322: INFO: Pod "pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.025864867s - Jul 29 15:41:30.323: INFO: Pod "pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e": Phase="Running", Reason="", readiness=false. Elapsed: 4.026458319s - Jul 29 15:41:30.323: INFO: Pod "pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e" satisfied condition "running" - STEP: Reading file content from the nginx-container 07/29/23 15:41:30.323 - Jul 29 15:41:30.323: INFO: ExecWithOptions {Command:[/bin/sh -c cat /usr/share/volumeshare/shareddata.txt] Namespace:emptydir-9421 PodName:pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e ContainerName:busybox-main-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 15:41:30.325: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 15:41:30.330: INFO: ExecWithOptions: Clientset creation - Jul 29 15:41:30.331: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/emptydir-9421/pods/pod-sharedvolume-910025f2-bd19-4079-a3e7-117d1335df3e/exec?command=%2Fbin%2Fsh&command=-c&command=cat+%2Fusr%2Fshare%2Fvolumeshare%2Fshareddata.txt&container=busybox-main-container&container=busybox-main-container&stderr=true&stdout=true) - Jul 29 15:41:30.452: INFO: Exec stderr: "" + [It] should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:107 + STEP: Creating a pod to test emptydir 0666 on tmpfs 08/24/23 11:42:12.483 + Aug 24 11:42:12.496: INFO: Waiting up to 5m0s for pod "pod-7803c93c-9051-4327-a2e2-9c878715fe0b" in namespace "emptydir-7113" to be "Succeeded or Failed" + Aug 24 11:42:12.502: INFO: Pod "pod-7803c93c-9051-4327-a2e2-9c878715fe0b": Phase="Pending", Reason="", readiness=false. Elapsed: 5.929422ms + Aug 24 11:42:14.518: INFO: Pod "pod-7803c93c-9051-4327-a2e2-9c878715fe0b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021613182s + Aug 24 11:42:16.531: INFO: Pod "pod-7803c93c-9051-4327-a2e2-9c878715fe0b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.034391378s + STEP: Saw pod success 08/24/23 11:42:16.531 + Aug 24 11:42:16.532: INFO: Pod "pod-7803c93c-9051-4327-a2e2-9c878715fe0b" satisfied condition "Succeeded or Failed" + Aug 24 11:42:16.539: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-7803c93c-9051-4327-a2e2-9c878715fe0b container test-container: + STEP: delete the pod 08/24/23 11:42:16.551 + Aug 24 11:42:16.572: INFO: Waiting for pod pod-7803c93c-9051-4327-a2e2-9c878715fe0b to disappear + Aug 24 11:42:16.579: INFO: Pod pod-7803c93c-9051-4327-a2e2-9c878715fe0b no longer exists [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 15:41:30.453: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 11:42:16.579: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-9421" for this suite. 07/29/23 15:41:30.463 + STEP: Destroying namespace "emptydir-7113" for this suite. 08/24/23 11:42:16.592 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Aggregator - Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] - test/e2e/apimachinery/aggregator.go:100 -[BeforeEach] [sig-api-machinery] Aggregator +[sig-network] EndpointSliceMirroring + should mirror a custom Endpoints resource through create update and delete [Conformance] + test/e2e/network/endpointslicemirroring.go:53 +[BeforeEach] [sig-network] EndpointSliceMirroring set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:41:30.485 -Jul 29 15:41:30.485: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename aggregator 07/29/23 15:41:30.488 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:41:30.516 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:41:30.521 -[BeforeEach] [sig-api-machinery] Aggregator +STEP: Creating a kubernetes client 08/24/23 11:42:16.611 +Aug 24 11:42:16.611: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename endpointslicemirroring 08/24/23 11:42:16.613 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:42:16.647 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:42:16.651 +[BeforeEach] [sig-network] EndpointSliceMirroring test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] Aggregator - test/e2e/apimachinery/aggregator.go:78 -Jul 29 15:41:30.526: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -[It] Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] - test/e2e/apimachinery/aggregator.go:100 -STEP: Registering the sample API server. 07/29/23 15:41:30.529 -Jul 29 15:41:31.384: INFO: deployment "sample-apiserver-deployment" doesn't have the required revision set -Jul 29 15:41:33.473: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:35.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:37.484: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:39.481: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:41.481: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:43.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:45.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:47.485: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:49.490: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:51.489: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:53.484: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:55.485: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:57.484: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:41:59.481: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:01.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:03.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:05.480: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:07.487: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:09.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:11.480: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:13.485: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:15.484: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:17.502: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:19.485: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:21.480: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:23.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:25.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:27.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:29.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:31.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:33.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:35.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:37.480: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:39.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:41.481: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:43.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:45.481: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:47.485: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 15:42:49.732: INFO: Waited 237.049211ms for the sample-apiserver to be ready to handle requests. -STEP: Read Status for v1alpha1.wardle.example.com 07/29/23 15:42:49.817 -STEP: kubectl patch apiservice v1alpha1.wardle.example.com -p '{"spec":{"versionPriority": 400}}' 07/29/23 15:42:49.823 -STEP: List APIServices 07/29/23 15:42:49.836 -Jul 29 15:42:49.849: INFO: Found v1alpha1.wardle.example.com in APIServiceList -[AfterEach] [sig-api-machinery] Aggregator - test/e2e/apimachinery/aggregator.go:68 -[AfterEach] [sig-api-machinery] Aggregator +[BeforeEach] [sig-network] EndpointSliceMirroring + test/e2e/network/endpointslicemirroring.go:41 +[It] should mirror a custom Endpoints resource through create update and delete [Conformance] + test/e2e/network/endpointslicemirroring.go:53 +STEP: mirroring a new custom Endpoint 08/24/23 11:42:16.672 +Aug 24 11:42:16.690: INFO: Waiting for at least 1 EndpointSlice to exist, got 0 +STEP: mirroring an update to a custom Endpoint 08/24/23 11:42:18.701 +STEP: mirroring deletion of a custom Endpoint 08/24/23 11:42:18.724 +Aug 24 11:42:18.750: INFO: Waiting for 0 EndpointSlices to exist, got 1 +[AfterEach] [sig-network] EndpointSliceMirroring test/e2e/framework/node/init/init.go:32 -Jul 29 15:42:50.120: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Aggregator +Aug 24 11:42:20.756: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] EndpointSliceMirroring test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Aggregator +[DeferCleanup (Each)] [sig-network] EndpointSliceMirroring dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Aggregator +[DeferCleanup (Each)] [sig-network] EndpointSliceMirroring tear down framework | framework.go:193 -STEP: Destroying namespace "aggregator-3668" for this suite. 07/29/23 15:42:50.131 +STEP: Destroying namespace "endpointslicemirroring-36" for this suite. 08/24/23 11:42:20.764 ------------------------------ -• [SLOW TEST] [79.658 seconds] -[sig-api-machinery] Aggregator -test/e2e/apimachinery/framework.go:23 - Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] - test/e2e/apimachinery/aggregator.go:100 +• [4.165 seconds] +[sig-network] EndpointSliceMirroring +test/e2e/network/common/framework.go:23 + should mirror a custom Endpoints resource through create update and delete [Conformance] + test/e2e/network/endpointslicemirroring.go:53 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Aggregator + [BeforeEach] [sig-network] EndpointSliceMirroring set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:41:30.485 - Jul 29 15:41:30.485: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename aggregator 07/29/23 15:41:30.488 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:41:30.516 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:41:30.521 - [BeforeEach] [sig-api-machinery] Aggregator + STEP: Creating a kubernetes client 08/24/23 11:42:16.611 + Aug 24 11:42:16.611: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename endpointslicemirroring 08/24/23 11:42:16.613 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:42:16.647 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:42:16.651 + [BeforeEach] [sig-network] EndpointSliceMirroring test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] Aggregator - test/e2e/apimachinery/aggregator.go:78 - Jul 29 15:41:30.526: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - [It] Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] - test/e2e/apimachinery/aggregator.go:100 - STEP: Registering the sample API server. 07/29/23 15:41:30.529 - Jul 29 15:41:31.384: INFO: deployment "sample-apiserver-deployment" doesn't have the required revision set - Jul 29 15:41:33.473: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:35.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:37.484: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:39.481: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:41.481: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:43.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:45.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:47.485: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:49.490: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:51.489: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:53.484: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:55.485: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:57.484: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:41:59.481: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:01.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:03.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:05.480: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:07.487: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:09.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:11.480: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:13.485: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:15.484: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:17.502: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:19.485: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:21.480: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:23.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:25.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:27.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:29.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:31.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:33.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:35.483: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:37.480: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:39.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:41.481: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:43.482: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:45.481: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:47.485: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 15, 41, 31, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 15:42:49.732: INFO: Waited 237.049211ms for the sample-apiserver to be ready to handle requests. - STEP: Read Status for v1alpha1.wardle.example.com 07/29/23 15:42:49.817 - STEP: kubectl patch apiservice v1alpha1.wardle.example.com -p '{"spec":{"versionPriority": 400}}' 07/29/23 15:42:49.823 - STEP: List APIServices 07/29/23 15:42:49.836 - Jul 29 15:42:49.849: INFO: Found v1alpha1.wardle.example.com in APIServiceList - [AfterEach] [sig-api-machinery] Aggregator - test/e2e/apimachinery/aggregator.go:68 - [AfterEach] [sig-api-machinery] Aggregator + [BeforeEach] [sig-network] EndpointSliceMirroring + test/e2e/network/endpointslicemirroring.go:41 + [It] should mirror a custom Endpoints resource through create update and delete [Conformance] + test/e2e/network/endpointslicemirroring.go:53 + STEP: mirroring a new custom Endpoint 08/24/23 11:42:16.672 + Aug 24 11:42:16.690: INFO: Waiting for at least 1 EndpointSlice to exist, got 0 + STEP: mirroring an update to a custom Endpoint 08/24/23 11:42:18.701 + STEP: mirroring deletion of a custom Endpoint 08/24/23 11:42:18.724 + Aug 24 11:42:18.750: INFO: Waiting for 0 EndpointSlices to exist, got 1 + [AfterEach] [sig-network] EndpointSliceMirroring test/e2e/framework/node/init/init.go:32 - Jul 29 15:42:50.120: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Aggregator + Aug 24 11:42:20.756: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] EndpointSliceMirroring test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Aggregator + [DeferCleanup (Each)] [sig-network] EndpointSliceMirroring dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Aggregator + [DeferCleanup (Each)] [sig-network] EndpointSliceMirroring tear down framework | framework.go:193 - STEP: Destroying namespace "aggregator-3668" for this suite. 07/29/23 15:42:50.131 + STEP: Destroying namespace "endpointslicemirroring-36" for this suite. 08/24/23 11:42:20.764 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-node] Probing container - should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:184 -[BeforeEach] [sig-node] Probing container - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:42:50.166 -Jul 29 15:42:50.166: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-probe 07/29/23 15:42:50.171 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:42:50.219 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:42:50.223 -[BeforeEach] [sig-node] Probing container +[sig-storage] Subpath Atomic writer volumes + should support subpaths with projected pod [Conformance] + test/e2e/storage/subpath.go:106 +[BeforeEach] [sig-storage] Subpath + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 11:42:20.778 +Aug 24 11:42:20.778: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename subpath 08/24/23 11:42:20.781 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:42:20.809 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:42:20.816 +[BeforeEach] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 -[It] should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:184 -STEP: Creating pod liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e in namespace container-probe-161 07/29/23 15:42:50.228 -Jul 29 15:42:50.245: INFO: Waiting up to 5m0s for pod "liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e" in namespace "container-probe-161" to be "not pending" -Jul 29 15:42:50.255: INFO: Pod "liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e": Phase="Pending", Reason="", readiness=false. Elapsed: 9.574344ms -Jul 29 15:42:52.265: INFO: Pod "liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020050885s -Jul 29 15:42:54.264: INFO: Pod "liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e": Phase="Running", Reason="", readiness=true. Elapsed: 4.01920368s -Jul 29 15:42:54.264: INFO: Pod "liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e" satisfied condition "not pending" -Jul 29 15:42:54.264: INFO: Started pod liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e in namespace container-probe-161 -STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 15:42:54.264 -Jul 29 15:42:54.270: INFO: Initial restart count of pod liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e is 0 -STEP: deleting the pod 07/29/23 15:46:55.498 -[AfterEach] [sig-node] Probing container +[BeforeEach] Atomic writer volumes + test/e2e/storage/subpath.go:40 +STEP: Setting up data 08/24/23 11:42:20.821 +[It] should support subpaths with projected pod [Conformance] + test/e2e/storage/subpath.go:106 +STEP: Creating pod pod-subpath-test-projected-d4g6 08/24/23 11:42:20.838 +STEP: Creating a pod to test atomic-volume-subpath 08/24/23 11:42:20.839 +Aug 24 11:42:20.855: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-d4g6" in namespace "subpath-855" to be "Succeeded or Failed" +Aug 24 11:42:20.870: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Pending", Reason="", readiness=false. Elapsed: 14.553165ms +Aug 24 11:42:22.879: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 2.023861689s +Aug 24 11:42:24.880: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 4.024412624s +Aug 24 11:42:26.880: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 6.024250691s +Aug 24 11:42:28.888: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 8.032609071s +Aug 24 11:42:30.882: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 10.026461422s +Aug 24 11:42:32.882: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 12.026184906s +Aug 24 11:42:34.883: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 14.027264497s +Aug 24 11:42:36.876: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 16.020733632s +Aug 24 11:42:38.879: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 18.023149428s +Aug 24 11:42:40.881: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 20.025053116s +Aug 24 11:42:42.880: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=false. Elapsed: 22.024434299s +Aug 24 11:42:44.882: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.02619949s +STEP: Saw pod success 08/24/23 11:42:44.882 +Aug 24 11:42:44.882: INFO: Pod "pod-subpath-test-projected-d4g6" satisfied condition "Succeeded or Failed" +Aug 24 11:42:44.892: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-subpath-test-projected-d4g6 container test-container-subpath-projected-d4g6: +STEP: delete the pod 08/24/23 11:42:44.908 +Aug 24 11:42:44.928: INFO: Waiting for pod pod-subpath-test-projected-d4g6 to disappear +Aug 24 11:42:44.935: INFO: Pod pod-subpath-test-projected-d4g6 no longer exists +STEP: Deleting pod pod-subpath-test-projected-d4g6 08/24/23 11:42:44.935 +Aug 24 11:42:44.936: INFO: Deleting pod "pod-subpath-test-projected-d4g6" in namespace "subpath-855" +[AfterEach] [sig-storage] Subpath test/e2e/framework/node/init/init.go:32 -Jul 29 15:46:55.533: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Probing container +Aug 24 11:42:44.941: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] [sig-storage] Subpath dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] [sig-storage] Subpath tear down framework | framework.go:193 -STEP: Destroying namespace "container-probe-161" for this suite. 07/29/23 15:46:55.549 +STEP: Destroying namespace "subpath-855" for this suite. 08/24/23 11:42:44.95 ------------------------------ -• [SLOW TEST] [245.407 seconds] -[sig-node] Probing container -test/e2e/common/node/framework.go:23 - should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:184 +• [SLOW TEST] [24.183 seconds] +[sig-storage] Subpath +test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + test/e2e/storage/subpath.go:36 + should support subpaths with projected pod [Conformance] + test/e2e/storage/subpath.go:106 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Probing container + [BeforeEach] [sig-storage] Subpath set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:42:50.166 - Jul 29 15:42:50.166: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-probe 07/29/23 15:42:50.171 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:42:50.219 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:42:50.223 - [BeforeEach] [sig-node] Probing container + STEP: Creating a kubernetes client 08/24/23 11:42:20.778 + Aug 24 11:42:20.778: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename subpath 08/24/23 11:42:20.781 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:42:20.809 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:42:20.816 + [BeforeEach] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 - [It] should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:184 - STEP: Creating pod liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e in namespace container-probe-161 07/29/23 15:42:50.228 - Jul 29 15:42:50.245: INFO: Waiting up to 5m0s for pod "liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e" in namespace "container-probe-161" to be "not pending" - Jul 29 15:42:50.255: INFO: Pod "liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e": Phase="Pending", Reason="", readiness=false. Elapsed: 9.574344ms - Jul 29 15:42:52.265: INFO: Pod "liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020050885s - Jul 29 15:42:54.264: INFO: Pod "liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e": Phase="Running", Reason="", readiness=true. Elapsed: 4.01920368s - Jul 29 15:42:54.264: INFO: Pod "liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e" satisfied condition "not pending" - Jul 29 15:42:54.264: INFO: Started pod liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e in namespace container-probe-161 - STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 15:42:54.264 - Jul 29 15:42:54.270: INFO: Initial restart count of pod liveness-c6bcee84-9940-496d-a56c-da49a1b92b8e is 0 - STEP: deleting the pod 07/29/23 15:46:55.498 - [AfterEach] [sig-node] Probing container + [BeforeEach] Atomic writer volumes + test/e2e/storage/subpath.go:40 + STEP: Setting up data 08/24/23 11:42:20.821 + [It] should support subpaths with projected pod [Conformance] + test/e2e/storage/subpath.go:106 + STEP: Creating pod pod-subpath-test-projected-d4g6 08/24/23 11:42:20.838 + STEP: Creating a pod to test atomic-volume-subpath 08/24/23 11:42:20.839 + Aug 24 11:42:20.855: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-d4g6" in namespace "subpath-855" to be "Succeeded or Failed" + Aug 24 11:42:20.870: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Pending", Reason="", readiness=false. Elapsed: 14.553165ms + Aug 24 11:42:22.879: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 2.023861689s + Aug 24 11:42:24.880: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 4.024412624s + Aug 24 11:42:26.880: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 6.024250691s + Aug 24 11:42:28.888: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 8.032609071s + Aug 24 11:42:30.882: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 10.026461422s + Aug 24 11:42:32.882: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 12.026184906s + Aug 24 11:42:34.883: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 14.027264497s + Aug 24 11:42:36.876: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 16.020733632s + Aug 24 11:42:38.879: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 18.023149428s + Aug 24 11:42:40.881: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=true. Elapsed: 20.025053116s + Aug 24 11:42:42.880: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Running", Reason="", readiness=false. Elapsed: 22.024434299s + Aug 24 11:42:44.882: INFO: Pod "pod-subpath-test-projected-d4g6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.02619949s + STEP: Saw pod success 08/24/23 11:42:44.882 + Aug 24 11:42:44.882: INFO: Pod "pod-subpath-test-projected-d4g6" satisfied condition "Succeeded or Failed" + Aug 24 11:42:44.892: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-subpath-test-projected-d4g6 container test-container-subpath-projected-d4g6: + STEP: delete the pod 08/24/23 11:42:44.908 + Aug 24 11:42:44.928: INFO: Waiting for pod pod-subpath-test-projected-d4g6 to disappear + Aug 24 11:42:44.935: INFO: Pod pod-subpath-test-projected-d4g6 no longer exists + STEP: Deleting pod pod-subpath-test-projected-d4g6 08/24/23 11:42:44.935 + Aug 24 11:42:44.936: INFO: Deleting pod "pod-subpath-test-projected-d4g6" in namespace "subpath-855" + [AfterEach] [sig-storage] Subpath test/e2e/framework/node/init/init.go:32 - Jul 29 15:46:55.533: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Probing container + Aug 24 11:42:44.941: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] [sig-storage] Subpath dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] [sig-storage] Subpath tear down framework | framework.go:193 - STEP: Destroying namespace "container-probe-161" for this suite. 07/29/23 15:46:55.549 + STEP: Destroying namespace "subpath-855" for this suite. 08/24/23 11:42:44.95 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should provide podname only [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:53 -[BeforeEach] [sig-storage] Projected downwardAPI +[sig-network] Services + should serve a basic endpoint from pods [Conformance] + test/e2e/network/service.go:787 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:46:55.588 -Jul 29 15:46:55.588: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 15:46:55.592 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:46:55.643 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:46:55.652 -[BeforeEach] [sig-storage] Projected downwardAPI +STEP: Creating a kubernetes client 08/24/23 11:42:44.963 +Aug 24 11:42:44.963: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 11:42:44.965 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:42:44.996 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:42:45.007 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should provide podname only [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:53 -STEP: Creating a pod to test downward API volume plugin 07/29/23 15:46:55.66 -Jul 29 15:46:55.696: INFO: Waiting up to 5m0s for pod "downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6" in namespace "projected-9403" to be "Succeeded or Failed" -Jul 29 15:46:55.725: INFO: Pod "downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6": Phase="Pending", Reason="", readiness=false. Elapsed: 28.929721ms -Jul 29 15:46:57.735: INFO: Pod "downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.03935113s -Jul 29 15:46:59.735: INFO: Pod "downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.038982885s -STEP: Saw pod success 07/29/23 15:46:59.735 -Jul 29 15:46:59.735: INFO: Pod "downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6" satisfied condition "Succeeded or Failed" -Jul 29 15:46:59.742: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6 container client-container: -STEP: delete the pod 07/29/23 15:46:59.778 -Jul 29 15:46:59.805: INFO: Waiting for pod downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6 to disappear -Jul 29 15:46:59.811: INFO: Pod downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should serve a basic endpoint from pods [Conformance] + test/e2e/network/service.go:787 +STEP: creating service endpoint-test2 in namespace services-3037 08/24/23 11:42:45.012 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3037 to expose endpoints map[] 08/24/23 11:42:45.033 +Aug 24 11:42:45.050: INFO: Failed go get Endpoints object: endpoints "endpoint-test2" not found +Aug 24 11:42:46.066: INFO: successfully validated that service endpoint-test2 in namespace services-3037 exposes endpoints map[] +STEP: Creating pod pod1 in namespace services-3037 08/24/23 11:42:46.066 +Aug 24 11:42:46.084: INFO: Waiting up to 5m0s for pod "pod1" in namespace "services-3037" to be "running and ready" +Aug 24 11:42:46.092: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 7.855046ms +Aug 24 11:42:46.092: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:42:48.099: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 2.014771713s +Aug 24 11:42:48.099: INFO: The phase of Pod pod1 is Running (Ready = true) +Aug 24 11:42:48.099: INFO: Pod "pod1" satisfied condition "running and ready" +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3037 to expose endpoints map[pod1:[80]] 08/24/23 11:42:48.105 +Aug 24 11:42:48.137: INFO: successfully validated that service endpoint-test2 in namespace services-3037 exposes endpoints map[pod1:[80]] +STEP: Checking if the Service forwards traffic to pod1 08/24/23 11:42:48.137 +Aug 24 11:42:48.138: INFO: Creating new exec pod +Aug 24 11:42:48.152: INFO: Waiting up to 5m0s for pod "execpodsvgdv" in namespace "services-3037" to be "running" +Aug 24 11:42:48.161: INFO: Pod "execpodsvgdv": Phase="Pending", Reason="", readiness=false. Elapsed: 9.104013ms +Aug 24 11:42:50.174: INFO: Pod "execpodsvgdv": Phase="Running", Reason="", readiness=true. Elapsed: 2.021758893s +Aug 24 11:42:50.174: INFO: Pod "execpodsvgdv" satisfied condition "running" +Aug 24 11:42:51.176: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' +Aug 24 11:42:51.495: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" +Aug 24 11:42:51.496: INFO: stdout: "" +Aug 24 11:42:51.496: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 10.233.31.118 80' +Aug 24 11:42:51.736: INFO: stderr: "+ nc -v -z -w 2 10.233.31.118 80\nConnection to 10.233.31.118 80 port [tcp/http] succeeded!\n" +Aug 24 11:42:51.736: INFO: stdout: "" +STEP: Creating pod pod2 in namespace services-3037 08/24/23 11:42:51.736 +Aug 24 11:42:51.747: INFO: Waiting up to 5m0s for pod "pod2" in namespace "services-3037" to be "running and ready" +Aug 24 11:42:51.757: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 9.905344ms +Aug 24 11:42:51.757: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:42:53.767: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020571959s +Aug 24 11:42:53.768: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:42:55.766: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018963437s +Aug 24 11:42:55.766: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:42:57.767: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 6.019811529s +Aug 24 11:42:57.767: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:42:59.765: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 8.018620494s +Aug 24 11:42:59.766: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:43:01.765: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 10.018643718s +Aug 24 11:43:01.766: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:43:03.764: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 12.016763586s +Aug 24 11:43:03.764: INFO: The phase of Pod pod2 is Running (Ready = true) +Aug 24 11:43:03.764: INFO: Pod "pod2" satisfied condition "running and ready" +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3037 to expose endpoints map[pod1:[80] pod2:[80]] 08/24/23 11:43:03.769 +Aug 24 11:43:03.790: INFO: successfully validated that service endpoint-test2 in namespace services-3037 exposes endpoints map[pod1:[80] pod2:[80]] +STEP: Checking if the Service forwards traffic to pod1 and pod2 08/24/23 11:43:03.79 +Aug 24 11:43:04.791: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' +Aug 24 11:43:05.089: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" +Aug 24 11:43:05.089: INFO: stdout: "" +Aug 24 11:43:05.090: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 10.233.31.118 80' +Aug 24 11:43:05.393: INFO: stderr: "+ nc -v -z -w 2 10.233.31.118 80\nConnection to 10.233.31.118 80 port [tcp/http] succeeded!\n" +Aug 24 11:43:05.393: INFO: stdout: "" +STEP: Deleting pod pod1 in namespace services-3037 08/24/23 11:43:05.393 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3037 to expose endpoints map[pod2:[80]] 08/24/23 11:43:05.423 +Aug 24 11:43:06.495: INFO: successfully validated that service endpoint-test2 in namespace services-3037 exposes endpoints map[pod2:[80]] +STEP: Checking if the Service forwards traffic to pod2 08/24/23 11:43:06.495 +Aug 24 11:43:07.498: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' +Aug 24 11:43:07.788: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" +Aug 24 11:43:07.788: INFO: stdout: "" +Aug 24 11:43:07.789: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 10.233.31.118 80' +Aug 24 11:43:08.037: INFO: stderr: "+ nc -v -z -w 2 10.233.31.118 80\nConnection to 10.233.31.118 80 port [tcp/http] succeeded!\n" +Aug 24 11:43:08.037: INFO: stdout: "" +STEP: Deleting pod pod2 in namespace services-3037 08/24/23 11:43:08.037 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3037 to expose endpoints map[] 08/24/23 11:43:08.115 +Aug 24 11:43:09.155: INFO: successfully validated that service endpoint-test2 in namespace services-3037 exposes endpoints map[] +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 15:46:59.812: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +Aug 24 11:43:09.217: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "projected-9403" for this suite. 07/29/23 15:46:59.821 +STEP: Destroying namespace "services-3037" for this suite. 08/24/23 11:43:09.23 ------------------------------ -• [4.246 seconds] -[sig-storage] Projected downwardAPI -test/e2e/common/storage/framework.go:23 - should provide podname only [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:53 +• [SLOW TEST] [24.285 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should serve a basic endpoint from pods [Conformance] + test/e2e/network/service.go:787 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:46:55.588 - Jul 29 15:46:55.588: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 15:46:55.592 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:46:55.643 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:46:55.652 - [BeforeEach] [sig-storage] Projected downwardAPI + STEP: Creating a kubernetes client 08/24/23 11:42:44.963 + Aug 24 11:42:44.963: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 11:42:44.965 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:42:44.996 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:42:45.007 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should provide podname only [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:53 - STEP: Creating a pod to test downward API volume plugin 07/29/23 15:46:55.66 - Jul 29 15:46:55.696: INFO: Waiting up to 5m0s for pod "downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6" in namespace "projected-9403" to be "Succeeded or Failed" - Jul 29 15:46:55.725: INFO: Pod "downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6": Phase="Pending", Reason="", readiness=false. Elapsed: 28.929721ms - Jul 29 15:46:57.735: INFO: Pod "downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.03935113s - Jul 29 15:46:59.735: INFO: Pod "downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.038982885s - STEP: Saw pod success 07/29/23 15:46:59.735 - Jul 29 15:46:59.735: INFO: Pod "downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6" satisfied condition "Succeeded or Failed" - Jul 29 15:46:59.742: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6 container client-container: - STEP: delete the pod 07/29/23 15:46:59.778 - Jul 29 15:46:59.805: INFO: Waiting for pod downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6 to disappear - Jul 29 15:46:59.811: INFO: Pod downwardapi-volume-3121092f-1d6e-4049-bd79-da4bbb8f31c6 no longer exists - [AfterEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should serve a basic endpoint from pods [Conformance] + test/e2e/network/service.go:787 + STEP: creating service endpoint-test2 in namespace services-3037 08/24/23 11:42:45.012 + STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3037 to expose endpoints map[] 08/24/23 11:42:45.033 + Aug 24 11:42:45.050: INFO: Failed go get Endpoints object: endpoints "endpoint-test2" not found + Aug 24 11:42:46.066: INFO: successfully validated that service endpoint-test2 in namespace services-3037 exposes endpoints map[] + STEP: Creating pod pod1 in namespace services-3037 08/24/23 11:42:46.066 + Aug 24 11:42:46.084: INFO: Waiting up to 5m0s for pod "pod1" in namespace "services-3037" to be "running and ready" + Aug 24 11:42:46.092: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 7.855046ms + Aug 24 11:42:46.092: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:42:48.099: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 2.014771713s + Aug 24 11:42:48.099: INFO: The phase of Pod pod1 is Running (Ready = true) + Aug 24 11:42:48.099: INFO: Pod "pod1" satisfied condition "running and ready" + STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3037 to expose endpoints map[pod1:[80]] 08/24/23 11:42:48.105 + Aug 24 11:42:48.137: INFO: successfully validated that service endpoint-test2 in namespace services-3037 exposes endpoints map[pod1:[80]] + STEP: Checking if the Service forwards traffic to pod1 08/24/23 11:42:48.137 + Aug 24 11:42:48.138: INFO: Creating new exec pod + Aug 24 11:42:48.152: INFO: Waiting up to 5m0s for pod "execpodsvgdv" in namespace "services-3037" to be "running" + Aug 24 11:42:48.161: INFO: Pod "execpodsvgdv": Phase="Pending", Reason="", readiness=false. Elapsed: 9.104013ms + Aug 24 11:42:50.174: INFO: Pod "execpodsvgdv": Phase="Running", Reason="", readiness=true. Elapsed: 2.021758893s + Aug 24 11:42:50.174: INFO: Pod "execpodsvgdv" satisfied condition "running" + Aug 24 11:42:51.176: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' + Aug 24 11:42:51.495: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" + Aug 24 11:42:51.496: INFO: stdout: "" + Aug 24 11:42:51.496: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 10.233.31.118 80' + Aug 24 11:42:51.736: INFO: stderr: "+ nc -v -z -w 2 10.233.31.118 80\nConnection to 10.233.31.118 80 port [tcp/http] succeeded!\n" + Aug 24 11:42:51.736: INFO: stdout: "" + STEP: Creating pod pod2 in namespace services-3037 08/24/23 11:42:51.736 + Aug 24 11:42:51.747: INFO: Waiting up to 5m0s for pod "pod2" in namespace "services-3037" to be "running and ready" + Aug 24 11:42:51.757: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 9.905344ms + Aug 24 11:42:51.757: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:42:53.767: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020571959s + Aug 24 11:42:53.768: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:42:55.766: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018963437s + Aug 24 11:42:55.766: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:42:57.767: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 6.019811529s + Aug 24 11:42:57.767: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:42:59.765: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 8.018620494s + Aug 24 11:42:59.766: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:43:01.765: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 10.018643718s + Aug 24 11:43:01.766: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:43:03.764: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 12.016763586s + Aug 24 11:43:03.764: INFO: The phase of Pod pod2 is Running (Ready = true) + Aug 24 11:43:03.764: INFO: Pod "pod2" satisfied condition "running and ready" + STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3037 to expose endpoints map[pod1:[80] pod2:[80]] 08/24/23 11:43:03.769 + Aug 24 11:43:03.790: INFO: successfully validated that service endpoint-test2 in namespace services-3037 exposes endpoints map[pod1:[80] pod2:[80]] + STEP: Checking if the Service forwards traffic to pod1 and pod2 08/24/23 11:43:03.79 + Aug 24 11:43:04.791: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' + Aug 24 11:43:05.089: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" + Aug 24 11:43:05.089: INFO: stdout: "" + Aug 24 11:43:05.090: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 10.233.31.118 80' + Aug 24 11:43:05.393: INFO: stderr: "+ nc -v -z -w 2 10.233.31.118 80\nConnection to 10.233.31.118 80 port [tcp/http] succeeded!\n" + Aug 24 11:43:05.393: INFO: stdout: "" + STEP: Deleting pod pod1 in namespace services-3037 08/24/23 11:43:05.393 + STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3037 to expose endpoints map[pod2:[80]] 08/24/23 11:43:05.423 + Aug 24 11:43:06.495: INFO: successfully validated that service endpoint-test2 in namespace services-3037 exposes endpoints map[pod2:[80]] + STEP: Checking if the Service forwards traffic to pod2 08/24/23 11:43:06.495 + Aug 24 11:43:07.498: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' + Aug 24 11:43:07.788: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" + Aug 24 11:43:07.788: INFO: stdout: "" + Aug 24 11:43:07.789: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-3037 exec execpodsvgdv -- /bin/sh -x -c nc -v -z -w 2 10.233.31.118 80' + Aug 24 11:43:08.037: INFO: stderr: "+ nc -v -z -w 2 10.233.31.118 80\nConnection to 10.233.31.118 80 port [tcp/http] succeeded!\n" + Aug 24 11:43:08.037: INFO: stdout: "" + STEP: Deleting pod pod2 in namespace services-3037 08/24/23 11:43:08.037 + STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-3037 to expose endpoints map[] 08/24/23 11:43:08.115 + Aug 24 11:43:09.155: INFO: successfully validated that service endpoint-test2 in namespace services-3037 exposes endpoints map[] + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 15:46:59.812: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 11:43:09.217: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "projected-9403" for this suite. 07/29/23 15:46:59.821 + STEP: Destroying namespace "services-3037" for this suite. 08/24/23 11:43:09.23 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSSSSSSSS ------------------------------ -[sig-node] Pods - should run through the lifecycle of Pods and PodStatus [Conformance] - test/e2e/common/node/pods.go:896 -[BeforeEach] [sig-node] Pods +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition + getting/updating/patching custom resource definition status sub-resource works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:145 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:46:59.843 -Jul 29 15:46:59.843: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 15:46:59.847 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:46:59.916 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:46:59.922 -[BeforeEach] [sig-node] Pods +STEP: Creating a kubernetes client 08/24/23 11:43:09.249 +Aug 24 11:43:09.250: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename custom-resource-definition 08/24/23 11:43:09.254 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:43:09.284 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:43:09.291 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 -[It] should run through the lifecycle of Pods and PodStatus [Conformance] - test/e2e/common/node/pods.go:896 -STEP: creating a Pod with a static label 07/29/23 15:46:59.94 -STEP: watching for Pod to be ready 07/29/23 15:46:59.959 -Jul 29 15:46:59.962: INFO: observed Pod pod-test in namespace pods-4914 in phase Pending with labels: map[test-pod-static:true] & conditions [] -Jul 29 15:46:59.964: INFO: observed Pod pod-test in namespace pods-4914 in phase Pending with labels: map[test-pod-static:true] & conditions [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC }] -Jul 29 15:46:59.990: INFO: observed Pod pod-test in namespace pods-4914 in phase Pending with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC }] -Jul 29 15:47:01.442: INFO: Found Pod pod-test in namespace pods-4914 in phase Running with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:47:01 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:47:01 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC }] -STEP: patching the Pod with a new Label and updated data 07/29/23 15:47:01.449 -STEP: getting the Pod and ensuring that it's patched 07/29/23 15:47:01.475 -STEP: replacing the Pod's status Ready condition to False 07/29/23 15:47:01.488 -STEP: check the Pod again to ensure its Ready conditions are False 07/29/23 15:47:01.516 -STEP: deleting the Pod via a Collection with a LabelSelector 07/29/23 15:47:01.517 -STEP: watching for the Pod to be deleted 07/29/23 15:47:01.535 -Jul 29 15:47:01.543: INFO: observed event type MODIFIED -Jul 29 15:47:03.445: INFO: observed event type MODIFIED -Jul 29 15:47:04.462: INFO: observed event type MODIFIED -Jul 29 15:47:04.485: INFO: observed event type MODIFIED -[AfterEach] [sig-node] Pods +[It] getting/updating/patching custom resource definition status sub-resource works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:145 +Aug 24 11:43:09.299: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 15:47:04.496: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods +Aug 24 11:43:09.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "pods-4914" for this suite. 07/29/23 15:47:04.509 +STEP: Destroying namespace "custom-resource-definition-6027" for this suite. 08/24/23 11:43:09.875 ------------------------------ -• [4.679 seconds] -[sig-node] Pods -test/e2e/common/node/framework.go:23 - should run through the lifecycle of Pods and PodStatus [Conformance] - test/e2e/common/node/pods.go:896 +• [0.646 seconds] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + Simple CustomResourceDefinition + test/e2e/apimachinery/custom_resource_definition.go:50 + getting/updating/patching custom resource definition status sub-resource works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:145 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods + [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:46:59.843 - Jul 29 15:46:59.843: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 15:46:59.847 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:46:59.916 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:46:59.922 - [BeforeEach] [sig-node] Pods + STEP: Creating a kubernetes client 08/24/23 11:43:09.249 + Aug 24 11:43:09.250: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename custom-resource-definition 08/24/23 11:43:09.254 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:43:09.284 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:43:09.291 + [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 - [It] should run through the lifecycle of Pods and PodStatus [Conformance] - test/e2e/common/node/pods.go:896 - STEP: creating a Pod with a static label 07/29/23 15:46:59.94 - STEP: watching for Pod to be ready 07/29/23 15:46:59.959 - Jul 29 15:46:59.962: INFO: observed Pod pod-test in namespace pods-4914 in phase Pending with labels: map[test-pod-static:true] & conditions [] - Jul 29 15:46:59.964: INFO: observed Pod pod-test in namespace pods-4914 in phase Pending with labels: map[test-pod-static:true] & conditions [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC }] - Jul 29 15:46:59.990: INFO: observed Pod pod-test in namespace pods-4914 in phase Pending with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC }] - Jul 29 15:47:01.442: INFO: Found Pod pod-test in namespace pods-4914 in phase Running with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:47:01 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:47:01 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 15:46:59 +0000 UTC }] - STEP: patching the Pod with a new Label and updated data 07/29/23 15:47:01.449 - STEP: getting the Pod and ensuring that it's patched 07/29/23 15:47:01.475 - STEP: replacing the Pod's status Ready condition to False 07/29/23 15:47:01.488 - STEP: check the Pod again to ensure its Ready conditions are False 07/29/23 15:47:01.516 - STEP: deleting the Pod via a Collection with a LabelSelector 07/29/23 15:47:01.517 - STEP: watching for the Pod to be deleted 07/29/23 15:47:01.535 - Jul 29 15:47:01.543: INFO: observed event type MODIFIED - Jul 29 15:47:03.445: INFO: observed event type MODIFIED - Jul 29 15:47:04.462: INFO: observed event type MODIFIED - Jul 29 15:47:04.485: INFO: observed event type MODIFIED - [AfterEach] [sig-node] Pods + [It] getting/updating/patching custom resource definition status sub-resource works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:145 + Aug 24 11:43:09.299: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 15:47:04.496: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods + Aug 24 11:43:09.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "pods-4914" for this suite. 07/29/23 15:47:04.509 + STEP: Destroying namespace "custom-resource-definition-6027" for this suite. 08/24/23 11:43:09.875 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] DisruptionController - should create a PodDisruptionBudget [Conformance] - test/e2e/apps/disruption.go:108 -[BeforeEach] [sig-apps] DisruptionController +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + listing validating webhooks should work [Conformance] + test/e2e/apimachinery/webhook.go:582 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:47:04.522 -Jul 29 15:47:04.522: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename disruption 07/29/23 15:47:04.526 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:47:04.555 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:47:04.56 -[BeforeEach] [sig-apps] DisruptionController +STEP: Creating a kubernetes client 08/24/23 11:43:09.898 +Aug 24 11:43:09.898: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 11:43:09.9 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:43:09.941 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:43:09.949 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] DisruptionController - test/e2e/apps/disruption.go:72 -[It] should create a PodDisruptionBudget [Conformance] - test/e2e/apps/disruption.go:108 -STEP: creating the pdb 07/29/23 15:47:04.564 -STEP: Waiting for the pdb to be processed 07/29/23 15:47:04.573 -STEP: updating the pdb 07/29/23 15:47:06.59 -STEP: Waiting for the pdb to be processed 07/29/23 15:47:06.605 -STEP: patching the pdb 07/29/23 15:47:06.614 -STEP: Waiting for the pdb to be processed 07/29/23 15:47:06.631 -STEP: Waiting for the pdb to be deleted 07/29/23 15:47:08.669 -[AfterEach] [sig-apps] DisruptionController +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 11:43:09.976 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 11:43:10.53 +STEP: Deploying the webhook pod 08/24/23 11:43:10.544 +STEP: Wait for the deployment to be ready 08/24/23 11:43:10.569 +Aug 24 11:43:10.599: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service 08/24/23 11:43:12.62 +STEP: Verifying the service has paired with the endpoint 08/24/23 11:43:12.635 +Aug 24 11:43:13.635: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] listing validating webhooks should work [Conformance] + test/e2e/apimachinery/webhook.go:582 +STEP: Listing all of the created validation webhooks 08/24/23 11:43:13.746 +STEP: Creating a configMap that does not comply to the validation webhook rules 08/24/23 11:43:13.813 +STEP: Deleting the collection of validation webhooks 08/24/23 11:43:13.86 +STEP: Creating a configMap that does not comply to the validation webhook rules 08/24/23 11:43:13.944 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 15:47:08.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] DisruptionController +Aug 24 11:43:13.962: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] DisruptionController +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] DisruptionController +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "disruption-4767" for this suite. 07/29/23 15:47:08.683 +STEP: Destroying namespace "webhook-9625" for this suite. 08/24/23 11:43:14.088 +STEP: Destroying namespace "webhook-9625-markers" for this suite. 08/24/23 11:43:14.154 ------------------------------ -• [4.173 seconds] -[sig-apps] DisruptionController -test/e2e/apps/framework.go:23 - should create a PodDisruptionBudget [Conformance] - test/e2e/apps/disruption.go:108 +• [4.280 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + listing validating webhooks should work [Conformance] + test/e2e/apimachinery/webhook.go:582 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] DisruptionController + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:47:04.522 - Jul 29 15:47:04.522: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename disruption 07/29/23 15:47:04.526 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:47:04.555 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:47:04.56 - [BeforeEach] [sig-apps] DisruptionController + STEP: Creating a kubernetes client 08/24/23 11:43:09.898 + Aug 24 11:43:09.898: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 11:43:09.9 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:43:09.941 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:43:09.949 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] DisruptionController - test/e2e/apps/disruption.go:72 - [It] should create a PodDisruptionBudget [Conformance] - test/e2e/apps/disruption.go:108 - STEP: creating the pdb 07/29/23 15:47:04.564 - STEP: Waiting for the pdb to be processed 07/29/23 15:47:04.573 - STEP: updating the pdb 07/29/23 15:47:06.59 - STEP: Waiting for the pdb to be processed 07/29/23 15:47:06.605 - STEP: patching the pdb 07/29/23 15:47:06.614 - STEP: Waiting for the pdb to be processed 07/29/23 15:47:06.631 - STEP: Waiting for the pdb to be deleted 07/29/23 15:47:08.669 - [AfterEach] [sig-apps] DisruptionController + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 11:43:09.976 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 11:43:10.53 + STEP: Deploying the webhook pod 08/24/23 11:43:10.544 + STEP: Wait for the deployment to be ready 08/24/23 11:43:10.569 + Aug 24 11:43:10.599: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set + STEP: Deploying the webhook service 08/24/23 11:43:12.62 + STEP: Verifying the service has paired with the endpoint 08/24/23 11:43:12.635 + Aug 24 11:43:13.635: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] listing validating webhooks should work [Conformance] + test/e2e/apimachinery/webhook.go:582 + STEP: Listing all of the created validation webhooks 08/24/23 11:43:13.746 + STEP: Creating a configMap that does not comply to the validation webhook rules 08/24/23 11:43:13.813 + STEP: Deleting the collection of validation webhooks 08/24/23 11:43:13.86 + STEP: Creating a configMap that does not comply to the validation webhook rules 08/24/23 11:43:13.944 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 15:47:08.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] DisruptionController + Aug 24 11:43:13.962: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] DisruptionController + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] DisruptionController + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "disruption-4767" for this suite. 07/29/23 15:47:08.683 + STEP: Destroying namespace "webhook-9625" for this suite. 08/24/23 11:43:14.088 + STEP: Destroying namespace "webhook-9625-markers" for this suite. 08/24/23 11:43:14.154 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-node] PreStop - should call prestop when killing a pod [Conformance] - test/e2e/node/pre_stop.go:168 -[BeforeEach] [sig-node] PreStop +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should be able to deny attaching pod [Conformance] + test/e2e/apimachinery/webhook.go:209 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:47:08.705 -Jul 29 15:47:08.705: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename prestop 07/29/23 15:47:08.707 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:47:08.741 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:47:08.747 -[BeforeEach] [sig-node] PreStop - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] PreStop - test/e2e/node/pre_stop.go:159 -[It] should call prestop when killing a pod [Conformance] - test/e2e/node/pre_stop.go:168 -STEP: Creating server pod server in namespace prestop-8406 07/29/23 15:47:08.753 -STEP: Waiting for pods to come up. 07/29/23 15:47:08.772 -Jul 29 15:47:08.772: INFO: Waiting up to 5m0s for pod "server" in namespace "prestop-8406" to be "running" -Jul 29 15:47:08.781: INFO: Pod "server": Phase="Pending", Reason="", readiness=false. Elapsed: 8.132785ms -Jul 29 15:47:10.789: INFO: Pod "server": Phase="Running", Reason="", readiness=true. Elapsed: 2.016816411s -Jul 29 15:47:10.789: INFO: Pod "server" satisfied condition "running" -STEP: Creating tester pod tester in namespace prestop-8406 07/29/23 15:47:10.795 -Jul 29 15:47:10.804: INFO: Waiting up to 5m0s for pod "tester" in namespace "prestop-8406" to be "running" -Jul 29 15:47:10.837: INFO: Pod "tester": Phase="Pending", Reason="", readiness=false. Elapsed: 32.579285ms -Jul 29 15:47:12.847: INFO: Pod "tester": Phase="Running", Reason="", readiness=true. Elapsed: 2.043320082s -Jul 29 15:47:12.848: INFO: Pod "tester" satisfied condition "running" -STEP: Deleting pre-stop pod 07/29/23 15:47:12.848 -Jul 29 15:47:17.878: INFO: Saw: { - "Hostname": "server", - "Sent": null, - "Received": { - "prestop": 1 - }, - "Errors": null, - "Log": [ - "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", - "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up." - ], - "StillContactingPeers": true -} -STEP: Deleting the server pod 07/29/23 15:47:17.879 -[AfterEach] [sig-node] PreStop - test/e2e/framework/node/init/init.go:32 -Jul 29 15:47:17.904: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] PreStop - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] PreStop - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] PreStop - tear down framework | framework.go:193 -STEP: Destroying namespace "prestop-8406" for this suite. 07/29/23 15:47:17.917 ------------------------------- -• [SLOW TEST] [9.230 seconds] -[sig-node] PreStop -test/e2e/node/framework.go:23 - should call prestop when killing a pod [Conformance] - test/e2e/node/pre_stop.go:168 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] PreStop - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:47:08.705 - Jul 29 15:47:08.705: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename prestop 07/29/23 15:47:08.707 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:47:08.741 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:47:08.747 - [BeforeEach] [sig-node] PreStop - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] PreStop - test/e2e/node/pre_stop.go:159 - [It] should call prestop when killing a pod [Conformance] - test/e2e/node/pre_stop.go:168 - STEP: Creating server pod server in namespace prestop-8406 07/29/23 15:47:08.753 - STEP: Waiting for pods to come up. 07/29/23 15:47:08.772 - Jul 29 15:47:08.772: INFO: Waiting up to 5m0s for pod "server" in namespace "prestop-8406" to be "running" - Jul 29 15:47:08.781: INFO: Pod "server": Phase="Pending", Reason="", readiness=false. Elapsed: 8.132785ms - Jul 29 15:47:10.789: INFO: Pod "server": Phase="Running", Reason="", readiness=true. Elapsed: 2.016816411s - Jul 29 15:47:10.789: INFO: Pod "server" satisfied condition "running" - STEP: Creating tester pod tester in namespace prestop-8406 07/29/23 15:47:10.795 - Jul 29 15:47:10.804: INFO: Waiting up to 5m0s for pod "tester" in namespace "prestop-8406" to be "running" - Jul 29 15:47:10.837: INFO: Pod "tester": Phase="Pending", Reason="", readiness=false. Elapsed: 32.579285ms - Jul 29 15:47:12.847: INFO: Pod "tester": Phase="Running", Reason="", readiness=true. Elapsed: 2.043320082s - Jul 29 15:47:12.848: INFO: Pod "tester" satisfied condition "running" - STEP: Deleting pre-stop pod 07/29/23 15:47:12.848 - Jul 29 15:47:17.878: INFO: Saw: { - "Hostname": "server", - "Sent": null, - "Received": { - "prestop": 1 - }, - "Errors": null, - "Log": [ - "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", - "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up." - ], - "StillContactingPeers": true - } - STEP: Deleting the server pod 07/29/23 15:47:17.879 - [AfterEach] [sig-node] PreStop - test/e2e/framework/node/init/init.go:32 - Jul 29 15:47:17.904: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] PreStop - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] PreStop - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] PreStop - tear down framework | framework.go:193 - STEP: Destroying namespace "prestop-8406" for this suite. 07/29/23 15:47:17.917 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] - should have a working scale subresource [Conformance] - test/e2e/apps/statefulset.go:848 -[BeforeEach] [sig-apps] StatefulSet - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:47:17.943 -Jul 29 15:47:17.943: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename statefulset 07/29/23 15:47:17.945 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:47:17.98 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:47:17.985 -[BeforeEach] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 -[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 -STEP: Creating service test in namespace statefulset-7235 07/29/23 15:47:17.992 -[It] should have a working scale subresource [Conformance] - test/e2e/apps/statefulset.go:848 -STEP: Creating statefulset ss in namespace statefulset-7235 07/29/23 15:47:18.004 -Jul 29 15:47:18.025: INFO: Found 0 stateful pods, waiting for 1 -Jul 29 15:47:28.034: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -STEP: getting scale subresource 07/29/23 15:47:28.046 -STEP: updating a scale subresource 07/29/23 15:47:28.051 -STEP: verifying the statefulset Spec.Replicas was modified 07/29/23 15:47:28.067 -STEP: Patch a scale subresource 07/29/23 15:47:28.072 -STEP: verifying the statefulset Spec.Replicas was modified 07/29/23 15:47:28.09 -[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 -Jul 29 15:47:28.103: INFO: Deleting all statefulset in ns statefulset-7235 -Jul 29 15:47:28.119: INFO: Scaling statefulset ss to 0 -Jul 29 15:48:18.161: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 15:48:18.169: INFO: Deleting statefulset ss -[AfterEach] [sig-apps] StatefulSet - test/e2e/framework/node/init/init.go:32 -Jul 29 15:48:18.195: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] StatefulSet - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] StatefulSet - tear down framework | framework.go:193 -STEP: Destroying namespace "statefulset-7235" for this suite. 07/29/23 15:48:18.218 ------------------------------- -• [SLOW TEST] [60.289 seconds] -[sig-apps] StatefulSet -test/e2e/apps/framework.go:23 - Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:103 - should have a working scale subresource [Conformance] - test/e2e/apps/statefulset.go:848 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] StatefulSet - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:47:17.943 - Jul 29 15:47:17.943: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename statefulset 07/29/23 15:47:17.945 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:47:17.98 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:47:17.985 - [BeforeEach] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 - [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 - STEP: Creating service test in namespace statefulset-7235 07/29/23 15:47:17.992 - [It] should have a working scale subresource [Conformance] - test/e2e/apps/statefulset.go:848 - STEP: Creating statefulset ss in namespace statefulset-7235 07/29/23 15:47:18.004 - Jul 29 15:47:18.025: INFO: Found 0 stateful pods, waiting for 1 - Jul 29 15:47:28.034: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true - STEP: getting scale subresource 07/29/23 15:47:28.046 - STEP: updating a scale subresource 07/29/23 15:47:28.051 - STEP: verifying the statefulset Spec.Replicas was modified 07/29/23 15:47:28.067 - STEP: Patch a scale subresource 07/29/23 15:47:28.072 - STEP: verifying the statefulset Spec.Replicas was modified 07/29/23 15:47:28.09 - [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 - Jul 29 15:47:28.103: INFO: Deleting all statefulset in ns statefulset-7235 - Jul 29 15:47:28.119: INFO: Scaling statefulset ss to 0 - Jul 29 15:48:18.161: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 15:48:18.169: INFO: Deleting statefulset ss - [AfterEach] [sig-apps] StatefulSet - test/e2e/framework/node/init/init.go:32 - Jul 29 15:48:18.195: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] StatefulSet - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] StatefulSet - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] StatefulSet - tear down framework | framework.go:193 - STEP: Destroying namespace "statefulset-7235" for this suite. 07/29/23 15:48:18.218 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSS ------------------------------- -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should be able to deny attaching pod [Conformance] - test/e2e/apimachinery/webhook.go:209 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:48:18.239 -Jul 29 15:48:18.239: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 15:48:18.243 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:48:18.269 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:48:18.279 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 11:43:14.179 +Aug 24 11:43:14.180: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 11:43:14.19 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:43:14.268 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:43:14.281 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 15:48:18.321 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 15:48:19.006 -STEP: Deploying the webhook pod 07/29/23 15:48:19.018 -STEP: Wait for the deployment to be ready 07/29/23 15:48:19.039 -Jul 29 15:48:19.068: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service 07/29/23 15:48:21.102 -STEP: Verifying the service has paired with the endpoint 07/29/23 15:48:21.126 -Jul 29 15:48:22.127: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +STEP: Setting up server cert 08/24/23 11:43:14.332 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 11:43:15.395 +STEP: Deploying the webhook pod 08/24/23 11:43:15.432 +STEP: Wait for the deployment to be ready 08/24/23 11:43:15.486 +Aug 24 11:43:15.502: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +STEP: Deploying the webhook service 08/24/23 11:43:17.521 +STEP: Verifying the service has paired with the endpoint 08/24/23 11:43:17.545 +Aug 24 11:43:18.546: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 [It] should be able to deny attaching pod [Conformance] test/e2e/apimachinery/webhook.go:209 -STEP: Registering the webhook via the AdmissionRegistration API 07/29/23 15:48:22.136 -STEP: create a pod 07/29/23 15:48:22.172 -Jul 29 15:48:22.187: INFO: Waiting up to 5m0s for pod "to-be-attached-pod" in namespace "webhook-6661" to be "running" -Jul 29 15:48:22.212: INFO: Pod "to-be-attached-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 24.56373ms -Jul 29 15:48:24.218: INFO: Pod "to-be-attached-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.030817644s -Jul 29 15:48:24.218: INFO: Pod "to-be-attached-pod" satisfied condition "running" -STEP: 'kubectl attach' the pod, should be denied by the webhook 07/29/23 15:48:24.218 -Jul 29 15:48:24.219: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=webhook-6661 attach --namespace=webhook-6661 to-be-attached-pod -i -c=container1' -Jul 29 15:48:24.410: INFO: rc: 1 +STEP: Registering the webhook via the AdmissionRegistration API 08/24/23 11:43:18.553 +STEP: create a pod 08/24/23 11:43:18.588 +Aug 24 11:43:18.605: INFO: Waiting up to 5m0s for pod "to-be-attached-pod" in namespace "webhook-2697" to be "running" +Aug 24 11:43:18.611: INFO: Pod "to-be-attached-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.160101ms +Aug 24 11:43:20.618: INFO: Pod "to-be-attached-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.013378871s +Aug 24 11:43:20.618: INFO: Pod "to-be-attached-pod" satisfied condition "running" +STEP: 'kubectl attach' the pod, should be denied by the webhook 08/24/23 11:43:20.619 +Aug 24 11:43:20.619: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=webhook-2697 attach --namespace=webhook-2697 to-be-attached-pod -i -c=container1' +Aug 24 11:43:20.777: INFO: rc: 1 [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 15:48:24.425: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 11:43:20.788: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:105 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -2622,10 +2103,10 @@ Jul 29 15:48:24.425: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-6661" for this suite. 07/29/23 15:48:24.513 -STEP: Destroying namespace "webhook-6661-markers" for this suite. 07/29/23 15:48:24.569 +STEP: Destroying namespace "webhook-2697" for this suite. 08/24/23 11:43:20.876 +STEP: Destroying namespace "webhook-2697-markers" for this suite. 08/24/23 11:43:20.91 ------------------------------ -• [SLOW TEST] [6.359 seconds] +• [SLOW TEST] [6.794 seconds] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/framework.go:23 should be able to deny attaching pod [Conformance] @@ -2634,37 +2115,37 @@ test/e2e/apimachinery/framework.go:23 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:48:18.239 - Jul 29 15:48:18.239: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 15:48:18.243 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:48:18.269 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:48:18.279 + STEP: Creating a kubernetes client 08/24/23 11:43:14.179 + Aug 24 11:43:14.180: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 11:43:14.19 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:43:14.268 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:43:14.281 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 15:48:18.321 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 15:48:19.006 - STEP: Deploying the webhook pod 07/29/23 15:48:19.018 - STEP: Wait for the deployment to be ready 07/29/23 15:48:19.039 - Jul 29 15:48:19.068: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set - STEP: Deploying the webhook service 07/29/23 15:48:21.102 - STEP: Verifying the service has paired with the endpoint 07/29/23 15:48:21.126 - Jul 29 15:48:22.127: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + STEP: Setting up server cert 08/24/23 11:43:14.332 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 11:43:15.395 + STEP: Deploying the webhook pod 08/24/23 11:43:15.432 + STEP: Wait for the deployment to be ready 08/24/23 11:43:15.486 + Aug 24 11:43:15.502: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created + STEP: Deploying the webhook service 08/24/23 11:43:17.521 + STEP: Verifying the service has paired with the endpoint 08/24/23 11:43:17.545 + Aug 24 11:43:18.546: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 [It] should be able to deny attaching pod [Conformance] test/e2e/apimachinery/webhook.go:209 - STEP: Registering the webhook via the AdmissionRegistration API 07/29/23 15:48:22.136 - STEP: create a pod 07/29/23 15:48:22.172 - Jul 29 15:48:22.187: INFO: Waiting up to 5m0s for pod "to-be-attached-pod" in namespace "webhook-6661" to be "running" - Jul 29 15:48:22.212: INFO: Pod "to-be-attached-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 24.56373ms - Jul 29 15:48:24.218: INFO: Pod "to-be-attached-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.030817644s - Jul 29 15:48:24.218: INFO: Pod "to-be-attached-pod" satisfied condition "running" - STEP: 'kubectl attach' the pod, should be denied by the webhook 07/29/23 15:48:24.218 - Jul 29 15:48:24.219: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=webhook-6661 attach --namespace=webhook-6661 to-be-attached-pod -i -c=container1' - Jul 29 15:48:24.410: INFO: rc: 1 + STEP: Registering the webhook via the AdmissionRegistration API 08/24/23 11:43:18.553 + STEP: create a pod 08/24/23 11:43:18.588 + Aug 24 11:43:18.605: INFO: Waiting up to 5m0s for pod "to-be-attached-pod" in namespace "webhook-2697" to be "running" + Aug 24 11:43:18.611: INFO: Pod "to-be-attached-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.160101ms + Aug 24 11:43:20.618: INFO: Pod "to-be-attached-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.013378871s + Aug 24 11:43:20.618: INFO: Pod "to-be-attached-pod" satisfied condition "running" + STEP: 'kubectl attach' the pod, should be denied by the webhook 08/24/23 11:43:20.619 + Aug 24 11:43:20.619: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=webhook-2697 attach --namespace=webhook-2697 to-be-attached-pod -i -c=container1' + Aug 24 11:43:20.777: INFO: rc: 1 [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 15:48:24.425: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 11:43:20.788: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:105 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -2673,2977 +2154,3645 @@ test/e2e/apimachinery/framework.go:23 dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-6661" for this suite. 07/29/23 15:48:24.513 - STEP: Destroying namespace "webhook-6661-markers" for this suite. 07/29/23 15:48:24.569 + STEP: Destroying namespace "webhook-2697" for this suite. 08/24/23 11:43:20.876 + STEP: Destroying namespace "webhook-2697-markers" for this suite. 08/24/23 11:43:20.91 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Lease - lease API should be available [Conformance] - test/e2e/common/node/lease.go:72 -[BeforeEach] [sig-node] Lease +[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch + watch on custom resource definition objects [Conformance] + test/e2e/apimachinery/crd_watch.go:51 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:48:24.614 -Jul 29 15:48:24.615: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename lease-test 07/29/23 15:48:24.62 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:48:24.653 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:48:24.666 -[BeforeEach] [sig-node] Lease +STEP: Creating a kubernetes client 08/24/23 11:43:20.975 +Aug 24 11:43:20.975: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-watch 08/24/23 11:43:20.98 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:43:21.022 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:43:21.027 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] lease API should be available [Conformance] - test/e2e/common/node/lease.go:72 -[AfterEach] [sig-node] Lease +[It] watch on custom resource definition objects [Conformance] + test/e2e/apimachinery/crd_watch.go:51 +Aug 24 11:43:21.034: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Creating first CR 08/24/23 11:43:23.752 +Aug 24 11:43:23.762: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:23Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:23Z]] name:name1 resourceVersion:5002 uid:c69e0bd2-4587-49e2-a1c6-c393762769af] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Creating second CR 08/24/23 11:43:33.763 +Aug 24 11:43:33.778: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:33Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:33Z]] name:name2 resourceVersion:5049 uid:5642a16e-f335-413d-8ece-db363cf93380] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Modifying first CR 08/24/23 11:43:43.779 +Aug 24 11:43:43.802: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:23Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:43Z]] name:name1 resourceVersion:5072 uid:c69e0bd2-4587-49e2-a1c6-c393762769af] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Modifying second CR 08/24/23 11:43:53.804 +Aug 24 11:43:53.816: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:33Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:53Z]] name:name2 resourceVersion:5095 uid:5642a16e-f335-413d-8ece-db363cf93380] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Deleting first CR 08/24/23 11:44:03.817 +Aug 24 11:44:03.833: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:23Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:43Z]] name:name1 resourceVersion:5118 uid:c69e0bd2-4587-49e2-a1c6-c393762769af] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Deleting second CR 08/24/23 11:44:13.834 +Aug 24 11:44:13.852: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:33Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:53Z]] name:name2 resourceVersion:5141 uid:5642a16e-f335-413d-8ece-db363cf93380] num:map[num1:9223372036854775807 num2:1000000]]} +[AfterEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 15:48:24.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Lease +Aug 24 11:44:24.384: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Lease +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Lease +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "lease-test-6011" for this suite. 07/29/23 15:48:24.797 +STEP: Destroying namespace "crd-watch-896" for this suite. 08/24/23 11:44:24.396 ------------------------------ -• [0.200 seconds] -[sig-node] Lease -test/e2e/common/node/framework.go:23 - lease API should be available [Conformance] - test/e2e/common/node/lease.go:72 +• [SLOW TEST] [63.442 seconds] +[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + CustomResourceDefinition Watch + test/e2e/apimachinery/crd_watch.go:44 + watch on custom resource definition objects [Conformance] + test/e2e/apimachinery/crd_watch.go:51 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Lease + [BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:48:24.614 - Jul 29 15:48:24.615: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename lease-test 07/29/23 15:48:24.62 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:48:24.653 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:48:24.666 - [BeforeEach] [sig-node] Lease + STEP: Creating a kubernetes client 08/24/23 11:43:20.975 + Aug 24 11:43:20.975: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-watch 08/24/23 11:43:20.98 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:43:21.022 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:43:21.027 + [BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] lease API should be available [Conformance] - test/e2e/common/node/lease.go:72 - [AfterEach] [sig-node] Lease + [It] watch on custom resource definition objects [Conformance] + test/e2e/apimachinery/crd_watch.go:51 + Aug 24 11:43:21.034: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Creating first CR 08/24/23 11:43:23.752 + Aug 24 11:43:23.762: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:23Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:23Z]] name:name1 resourceVersion:5002 uid:c69e0bd2-4587-49e2-a1c6-c393762769af] num:map[num1:9223372036854775807 num2:1000000]]} + STEP: Creating second CR 08/24/23 11:43:33.763 + Aug 24 11:43:33.778: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:33Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:33Z]] name:name2 resourceVersion:5049 uid:5642a16e-f335-413d-8ece-db363cf93380] num:map[num1:9223372036854775807 num2:1000000]]} + STEP: Modifying first CR 08/24/23 11:43:43.779 + Aug 24 11:43:43.802: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:23Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:43Z]] name:name1 resourceVersion:5072 uid:c69e0bd2-4587-49e2-a1c6-c393762769af] num:map[num1:9223372036854775807 num2:1000000]]} + STEP: Modifying second CR 08/24/23 11:43:53.804 + Aug 24 11:43:53.816: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:33Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:53Z]] name:name2 resourceVersion:5095 uid:5642a16e-f335-413d-8ece-db363cf93380] num:map[num1:9223372036854775807 num2:1000000]]} + STEP: Deleting first CR 08/24/23 11:44:03.817 + Aug 24 11:44:03.833: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:23Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:43Z]] name:name1 resourceVersion:5118 uid:c69e0bd2-4587-49e2-a1c6-c393762769af] num:map[num1:9223372036854775807 num2:1000000]]} + STEP: Deleting second CR 08/24/23 11:44:13.834 + Aug 24 11:44:13.852: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-08-24T11:43:33Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-08-24T11:43:53Z]] name:name2 resourceVersion:5141 uid:5642a16e-f335-413d-8ece-db363cf93380] num:map[num1:9223372036854775807 num2:1000000]]} + [AfterEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 15:48:24.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Lease + Aug 24 11:44:24.384: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Lease + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Lease + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "lease-test-6011" for this suite. 07/29/23 15:48:24.797 + STEP: Destroying namespace "crd-watch-896" for this suite. 08/24/23 11:44:24.396 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] - test/e2e/apimachinery/resource_quota.go:75 -[BeforeEach] [sig-api-machinery] ResourceQuota +[sig-node] NoExecuteTaintManager Single Pod [Serial] + removing taint cancels eviction [Disruptive] [Conformance] + test/e2e/node/taints.go:293 +[BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:48:24.841 -Jul 29 15:48:24.842: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 15:48:24.85 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:48:24.884 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:48:24.888 -[BeforeEach] [sig-api-machinery] ResourceQuota +STEP: Creating a kubernetes client 08/24/23 11:44:24.422 +Aug 24 11:44:24.422: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename taint-single-pod 08/24/23 11:44:24.426 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:44:24.48 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:44:24.488 +[BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] - test/e2e/apimachinery/resource_quota.go:75 -STEP: Counting existing ResourceQuota 07/29/23 15:48:24.893 -STEP: Creating a ResourceQuota 07/29/23 15:48:29.899 -STEP: Ensuring resource quota status is calculated 07/29/23 15:48:29.913 -[AfterEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] + test/e2e/node/taints.go:170 +Aug 24 11:44:24.495: INFO: Waiting up to 1m0s for all nodes to be ready +Aug 24 11:45:24.558: INFO: Waiting for terminating namespaces to be deleted... +[It] removing taint cancels eviction [Disruptive] [Conformance] + test/e2e/node/taints.go:293 +Aug 24 11:45:24.566: INFO: Starting informer... +STEP: Starting pod... 08/24/23 11:45:24.566 +Aug 24 11:45:24.800: INFO: Pod is running on pe9deep4seen-3. Tainting Node +STEP: Trying to apply a taint on the Node 08/24/23 11:45:24.8 +STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 08/24/23 11:45:24.824 +STEP: Waiting short time to make sure Pod is queued for deletion 08/24/23 11:45:24.835 +Aug 24 11:45:24.835: INFO: Pod wasn't evicted. Proceeding +Aug 24 11:45:24.835: INFO: Removing taint from Node +STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 08/24/23 11:45:24.87 +STEP: Waiting some time to make sure that toleration time passed. 08/24/23 11:45:24.889 +Aug 24 11:46:39.892: INFO: Pod wasn't evicted. Test successful +[AfterEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 15:48:31.921: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +Aug 24 11:46:39.893: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-3009" for this suite. 07/29/23 15:48:31.934 +STEP: Destroying namespace "taint-single-pod-7256" for this suite. 08/24/23 11:46:39.915 ------------------------------ -• [SLOW TEST] [7.107 seconds] -[sig-api-machinery] ResourceQuota -test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] - test/e2e/apimachinery/resource_quota.go:75 +• [SLOW TEST] [135.509 seconds] +[sig-node] NoExecuteTaintManager Single Pod [Serial] +test/e2e/node/framework.go:23 + removing taint cancels eviction [Disruptive] [Conformance] + test/e2e/node/taints.go:293 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:48:24.841 - Jul 29 15:48:24.842: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 15:48:24.85 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:48:24.884 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:48:24.888 - [BeforeEach] [sig-api-machinery] ResourceQuota + STEP: Creating a kubernetes client 08/24/23 11:44:24.422 + Aug 24 11:44:24.422: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename taint-single-pod 08/24/23 11:44:24.426 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:44:24.48 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:44:24.488 + [BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] - test/e2e/apimachinery/resource_quota.go:75 - STEP: Counting existing ResourceQuota 07/29/23 15:48:24.893 - STEP: Creating a ResourceQuota 07/29/23 15:48:29.899 - STEP: Ensuring resource quota status is calculated 07/29/23 15:48:29.913 - [AfterEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] + test/e2e/node/taints.go:170 + Aug 24 11:44:24.495: INFO: Waiting up to 1m0s for all nodes to be ready + Aug 24 11:45:24.558: INFO: Waiting for terminating namespaces to be deleted... + [It] removing taint cancels eviction [Disruptive] [Conformance] + test/e2e/node/taints.go:293 + Aug 24 11:45:24.566: INFO: Starting informer... + STEP: Starting pod... 08/24/23 11:45:24.566 + Aug 24 11:45:24.800: INFO: Pod is running on pe9deep4seen-3. Tainting Node + STEP: Trying to apply a taint on the Node 08/24/23 11:45:24.8 + STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 08/24/23 11:45:24.824 + STEP: Waiting short time to make sure Pod is queued for deletion 08/24/23 11:45:24.835 + Aug 24 11:45:24.835: INFO: Pod wasn't evicted. Proceeding + Aug 24 11:45:24.835: INFO: Removing taint from Node + STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 08/24/23 11:45:24.87 + STEP: Waiting some time to make sure that toleration time passed. 08/24/23 11:45:24.889 + Aug 24 11:46:39.892: INFO: Pod wasn't evicted. Test successful + [AfterEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 15:48:31.921: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + Aug 24 11:46:39.893: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-3009" for this suite. 07/29/23 15:48:31.934 + STEP: Destroying namespace "taint-single-pod-7256" for this suite. 08/24/23 11:46:39.915 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-network] DNS - should provide DNS for ExternalName services [Conformance] - test/e2e/network/dns.go:333 -[BeforeEach] [sig-network] DNS +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + removes definition from spec when one version gets changed to not be served [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:442 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:48:31.957 -Jul 29 15:48:31.957: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename dns 07/29/23 15:48:31.96 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:48:31.992 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:48:32.003 -[BeforeEach] [sig-network] DNS +STEP: Creating a kubernetes client 08/24/23 11:46:39.932 +Aug 24 11:46:39.932: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 11:46:39.937 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:46:39.966 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:46:39.97 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should provide DNS for ExternalName services [Conformance] - test/e2e/network/dns.go:333 -STEP: Creating a test externalName service 07/29/23 15:48:32.008 -STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:48:32.018 -STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:48:32.018 -STEP: creating a pod to probe DNS 07/29/23 15:48:32.019 -STEP: submitting the pod to kubernetes 07/29/23 15:48:32.019 -Jul 29 15:48:32.044: INFO: Waiting up to 15m0s for pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e" in namespace "dns-9145" to be "running" -Jul 29 15:48:32.051: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 7.03123ms -Jul 29 15:48:34.066: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021796813s -Jul 29 15:48:36.059: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 4.014194648s -Jul 29 15:48:38.062: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 6.01752449s -Jul 29 15:48:40.063: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 8.018587063s -Jul 29 15:48:42.059: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 10.014797297s -Jul 29 15:48:44.060: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 12.015568639s -Jul 29 15:48:46.063: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 14.018369457s -Jul 29 15:48:48.059: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 16.014724125s -Jul 29 15:48:50.061: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 18.016970255s -Jul 29 15:48:52.066: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 20.021625151s -Jul 29 15:48:54.060: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 22.015671937s -Jul 29 15:48:56.063: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Running", Reason="", readiness=true. Elapsed: 24.018461144s -Jul 29 15:48:56.063: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e" satisfied condition "running" -STEP: retrieving the pod 07/29/23 15:48:56.063 -STEP: looking for the results for each expected name from probers 07/29/23 15:48:56.073 -Jul 29 15:48:56.095: INFO: DNS probes using dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e succeeded - -STEP: deleting the pod 07/29/23 15:48:56.095 -STEP: changing the externalName to bar.example.com 07/29/23 15:48:56.133 -STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:48:56.178 -STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:48:56.179 -STEP: creating a second pod to probe DNS 07/29/23 15:48:56.179 -STEP: submitting the pod to kubernetes 07/29/23 15:48:56.179 -Jul 29 15:48:56.206: INFO: Waiting up to 15m0s for pod "dns-test-77546f80-18f7-46a7-8716-659d561fa9bc" in namespace "dns-9145" to be "running" -Jul 29 15:48:56.215: INFO: Pod "dns-test-77546f80-18f7-46a7-8716-659d561fa9bc": Phase="Pending", Reason="", readiness=false. Elapsed: 9.116088ms -Jul 29 15:48:58.228: INFO: Pod "dns-test-77546f80-18f7-46a7-8716-659d561fa9bc": Phase="Running", Reason="", readiness=true. Elapsed: 2.02201442s -Jul 29 15:48:58.228: INFO: Pod "dns-test-77546f80-18f7-46a7-8716-659d561fa9bc" satisfied condition "running" -STEP: retrieving the pod 07/29/23 15:48:58.228 -STEP: looking for the results for each expected name from probers 07/29/23 15:48:58.238 -Jul 29 15:48:58.252: INFO: File wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local from pod dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc contains 'foo.example.com. -' instead of 'bar.example.com.' -Jul 29 15:48:58.261: INFO: File jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local from pod dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc contains 'foo.example.com. -' instead of 'bar.example.com.' -Jul 29 15:48:58.261: INFO: Lookups using dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc failed for: [wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local] - -Jul 29 15:49:03.273: INFO: File wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local from pod dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc contains 'foo.example.com. -' instead of 'bar.example.com.' -Jul 29 15:49:03.281: INFO: File jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local from pod dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc contains 'foo.example.com. -' instead of 'bar.example.com.' -Jul 29 15:49:03.281: INFO: Lookups using dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc failed for: [wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local] - -Jul 29 15:49:08.276: INFO: DNS probes using dns-test-77546f80-18f7-46a7-8716-659d561fa9bc succeeded - -STEP: deleting the pod 07/29/23 15:49:08.277 -STEP: changing the service to type=ClusterIP 07/29/23 15:49:08.309 -STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local A > /results/wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:49:08.357 -STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local A > /results/jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:49:08.358 -STEP: creating a third pod to probe DNS 07/29/23 15:49:08.358 -STEP: submitting the pod to kubernetes 07/29/23 15:49:08.367 -Jul 29 15:49:08.391: INFO: Waiting up to 15m0s for pod "dns-test-e1cd7fc1-288e-406f-afe2-f46f43e656c9" in namespace "dns-9145" to be "running" -Jul 29 15:49:08.402: INFO: Pod "dns-test-e1cd7fc1-288e-406f-afe2-f46f43e656c9": Phase="Pending", Reason="", readiness=false. Elapsed: 11.495302ms -Jul 29 15:49:10.410: INFO: Pod "dns-test-e1cd7fc1-288e-406f-afe2-f46f43e656c9": Phase="Running", Reason="", readiness=true. Elapsed: 2.01965564s -Jul 29 15:49:10.410: INFO: Pod "dns-test-e1cd7fc1-288e-406f-afe2-f46f43e656c9" satisfied condition "running" -STEP: retrieving the pod 07/29/23 15:49:10.41 -STEP: looking for the results for each expected name from probers 07/29/23 15:49:10.416 -Jul 29 15:49:10.430: INFO: DNS probes using dns-test-e1cd7fc1-288e-406f-afe2-f46f43e656c9 succeeded - -STEP: deleting the pod 07/29/23 15:49:10.43 -STEP: deleting the test externalName service 07/29/23 15:49:10.447 -[AfterEach] [sig-network] DNS +[It] removes definition from spec when one version gets changed to not be served [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:442 +STEP: set up a multi version CRD 08/24/23 11:46:39.975 +Aug 24 11:46:39.976: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: mark a version not serverd 08/24/23 11:46:45.731 +STEP: check the unserved version gets removed 08/24/23 11:46:45.769 +STEP: check the other version is not changed 08/24/23 11:46:48.242 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 15:49:10.482: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] DNS +Aug 24 11:46:52.701: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "dns-9145" for this suite. 07/29/23 15:49:10.494 +STEP: Destroying namespace "crd-publish-openapi-754" for this suite. 08/24/23 11:46:52.723 ------------------------------ -• [SLOW TEST] [38.555 seconds] -[sig-network] DNS -test/e2e/network/common/framework.go:23 - should provide DNS for ExternalName services [Conformance] - test/e2e/network/dns.go:333 +• [SLOW TEST] [12.808 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + removes definition from spec when one version gets changed to not be served [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:442 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] DNS + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:48:31.957 - Jul 29 15:48:31.957: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename dns 07/29/23 15:48:31.96 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:48:31.992 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:48:32.003 - [BeforeEach] [sig-network] DNS + STEP: Creating a kubernetes client 08/24/23 11:46:39.932 + Aug 24 11:46:39.932: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 11:46:39.937 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:46:39.966 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:46:39.97 + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should provide DNS for ExternalName services [Conformance] - test/e2e/network/dns.go:333 - STEP: Creating a test externalName service 07/29/23 15:48:32.008 - STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:48:32.018 - STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:48:32.018 - STEP: creating a pod to probe DNS 07/29/23 15:48:32.019 - STEP: submitting the pod to kubernetes 07/29/23 15:48:32.019 - Jul 29 15:48:32.044: INFO: Waiting up to 15m0s for pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e" in namespace "dns-9145" to be "running" - Jul 29 15:48:32.051: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 7.03123ms - Jul 29 15:48:34.066: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021796813s - Jul 29 15:48:36.059: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 4.014194648s - Jul 29 15:48:38.062: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 6.01752449s - Jul 29 15:48:40.063: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 8.018587063s - Jul 29 15:48:42.059: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 10.014797297s - Jul 29 15:48:44.060: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 12.015568639s - Jul 29 15:48:46.063: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 14.018369457s - Jul 29 15:48:48.059: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 16.014724125s - Jul 29 15:48:50.061: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 18.016970255s - Jul 29 15:48:52.066: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 20.021625151s - Jul 29 15:48:54.060: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Pending", Reason="", readiness=false. Elapsed: 22.015671937s - Jul 29 15:48:56.063: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e": Phase="Running", Reason="", readiness=true. Elapsed: 24.018461144s - Jul 29 15:48:56.063: INFO: Pod "dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e" satisfied condition "running" - STEP: retrieving the pod 07/29/23 15:48:56.063 - STEP: looking for the results for each expected name from probers 07/29/23 15:48:56.073 - Jul 29 15:48:56.095: INFO: DNS probes using dns-test-abd0cc72-5fc8-44bc-a98d-a5b4f66c5d6e succeeded - - STEP: deleting the pod 07/29/23 15:48:56.095 - STEP: changing the externalName to bar.example.com 07/29/23 15:48:56.133 - STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:48:56.178 - STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:48:56.179 - STEP: creating a second pod to probe DNS 07/29/23 15:48:56.179 - STEP: submitting the pod to kubernetes 07/29/23 15:48:56.179 - Jul 29 15:48:56.206: INFO: Waiting up to 15m0s for pod "dns-test-77546f80-18f7-46a7-8716-659d561fa9bc" in namespace "dns-9145" to be "running" - Jul 29 15:48:56.215: INFO: Pod "dns-test-77546f80-18f7-46a7-8716-659d561fa9bc": Phase="Pending", Reason="", readiness=false. Elapsed: 9.116088ms - Jul 29 15:48:58.228: INFO: Pod "dns-test-77546f80-18f7-46a7-8716-659d561fa9bc": Phase="Running", Reason="", readiness=true. Elapsed: 2.02201442s - Jul 29 15:48:58.228: INFO: Pod "dns-test-77546f80-18f7-46a7-8716-659d561fa9bc" satisfied condition "running" - STEP: retrieving the pod 07/29/23 15:48:58.228 - STEP: looking for the results for each expected name from probers 07/29/23 15:48:58.238 - Jul 29 15:48:58.252: INFO: File wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local from pod dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc contains 'foo.example.com. - ' instead of 'bar.example.com.' - Jul 29 15:48:58.261: INFO: File jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local from pod dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc contains 'foo.example.com. - ' instead of 'bar.example.com.' - Jul 29 15:48:58.261: INFO: Lookups using dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc failed for: [wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local] - - Jul 29 15:49:03.273: INFO: File wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local from pod dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc contains 'foo.example.com. - ' instead of 'bar.example.com.' - Jul 29 15:49:03.281: INFO: File jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local from pod dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc contains 'foo.example.com. - ' instead of 'bar.example.com.' - Jul 29 15:49:03.281: INFO: Lookups using dns-9145/dns-test-77546f80-18f7-46a7-8716-659d561fa9bc failed for: [wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local] - - Jul 29 15:49:08.276: INFO: DNS probes using dns-test-77546f80-18f7-46a7-8716-659d561fa9bc succeeded - - STEP: deleting the pod 07/29/23 15:49:08.277 - STEP: changing the service to type=ClusterIP 07/29/23 15:49:08.309 - STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local A > /results/wheezy_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:49:08.357 - STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-9145.svc.cluster.local A > /results/jessie_udp@dns-test-service-3.dns-9145.svc.cluster.local; sleep 1; done - 07/29/23 15:49:08.358 - STEP: creating a third pod to probe DNS 07/29/23 15:49:08.358 - STEP: submitting the pod to kubernetes 07/29/23 15:49:08.367 - Jul 29 15:49:08.391: INFO: Waiting up to 15m0s for pod "dns-test-e1cd7fc1-288e-406f-afe2-f46f43e656c9" in namespace "dns-9145" to be "running" - Jul 29 15:49:08.402: INFO: Pod "dns-test-e1cd7fc1-288e-406f-afe2-f46f43e656c9": Phase="Pending", Reason="", readiness=false. Elapsed: 11.495302ms - Jul 29 15:49:10.410: INFO: Pod "dns-test-e1cd7fc1-288e-406f-afe2-f46f43e656c9": Phase="Running", Reason="", readiness=true. Elapsed: 2.01965564s - Jul 29 15:49:10.410: INFO: Pod "dns-test-e1cd7fc1-288e-406f-afe2-f46f43e656c9" satisfied condition "running" - STEP: retrieving the pod 07/29/23 15:49:10.41 - STEP: looking for the results for each expected name from probers 07/29/23 15:49:10.416 - Jul 29 15:49:10.430: INFO: DNS probes using dns-test-e1cd7fc1-288e-406f-afe2-f46f43e656c9 succeeded - - STEP: deleting the pod 07/29/23 15:49:10.43 - STEP: deleting the test externalName service 07/29/23 15:49:10.447 - [AfterEach] [sig-network] DNS + [It] removes definition from spec when one version gets changed to not be served [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:442 + STEP: set up a multi version CRD 08/24/23 11:46:39.975 + Aug 24 11:46:39.976: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: mark a version not serverd 08/24/23 11:46:45.731 + STEP: check the unserved version gets removed 08/24/23 11:46:45.769 + STEP: check the other version is not changed 08/24/23 11:46:48.242 + [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 15:49:10.482: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] DNS + Aug 24 11:46:52.701: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "dns-9145" for this suite. 07/29/23 15:49:10.494 + STEP: Destroying namespace "crd-publish-openapi-754" for this suite. 08/24/23 11:46:52.723 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPreemption [Serial] - validates lower priority pod preemption by critical pod [Conformance] - test/e2e/scheduling/preemption.go:224 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] +[sig-api-machinery] Garbage collector + should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] + test/e2e/apimachinery/garbage_collector.go:550 +[BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:49:10.519 -Jul 29 15:49:10.520: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sched-preemption 07/29/23 15:49:10.528 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:49:10.557 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:49:10.562 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] +STEP: Creating a kubernetes client 08/24/23 11:46:52.75 +Aug 24 11:46:52.750: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename gc 08/24/23 11:46:52.753 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:46:52.795 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:46:52.8 +[BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:97 -Jul 29 15:49:10.597: INFO: Waiting up to 1m0s for all nodes to be ready -Jul 29 15:50:10.669: INFO: Waiting for terminating namespaces to be deleted... -[It] validates lower priority pod preemption by critical pod [Conformance] - test/e2e/scheduling/preemption.go:224 -STEP: Create pods that use 4/5 of node resources. 07/29/23 15:50:10.681 -Jul 29 15:50:10.733: INFO: Created pod: pod0-0-sched-preemption-low-priority -Jul 29 15:50:10.758: INFO: Created pod: pod0-1-sched-preemption-medium-priority -Jul 29 15:50:10.868: INFO: Created pod: pod1-0-sched-preemption-medium-priority -Jul 29 15:50:10.891: INFO: Created pod: pod1-1-sched-preemption-medium-priority -Jul 29 15:50:10.978: INFO: Created pod: pod2-0-sched-preemption-medium-priority -Jul 29 15:50:11.015: INFO: Created pod: pod2-1-sched-preemption-medium-priority -STEP: Wait for pods to be scheduled. 07/29/23 15:50:11.016 -Jul 29 15:50:11.021: INFO: Waiting up to 5m0s for pod "pod0-0-sched-preemption-low-priority" in namespace "sched-preemption-7590" to be "running" -Jul 29 15:50:11.045: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 24.042994ms -Jul 29 15:50:13.054: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Running", Reason="", readiness=true. Elapsed: 2.033296041s -Jul 29 15:50:13.054: INFO: Pod "pod0-0-sched-preemption-low-priority" satisfied condition "running" -Jul 29 15:50:13.054: INFO: Waiting up to 5m0s for pod "pod0-1-sched-preemption-medium-priority" in namespace "sched-preemption-7590" to be "running" -Jul 29 15:50:13.060: INFO: Pod "pod0-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.981588ms -Jul 29 15:50:13.061: INFO: Pod "pod0-1-sched-preemption-medium-priority" satisfied condition "running" -Jul 29 15:50:13.061: INFO: Waiting up to 5m0s for pod "pod1-0-sched-preemption-medium-priority" in namespace "sched-preemption-7590" to be "running" -Jul 29 15:50:13.067: INFO: Pod "pod1-0-sched-preemption-medium-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 6.181387ms -Jul 29 15:50:15.075: INFO: Pod "pod1-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 2.014003934s -Jul 29 15:50:15.075: INFO: Pod "pod1-0-sched-preemption-medium-priority" satisfied condition "running" -Jul 29 15:50:15.075: INFO: Waiting up to 5m0s for pod "pod1-1-sched-preemption-medium-priority" in namespace "sched-preemption-7590" to be "running" -Jul 29 15:50:15.081: INFO: Pod "pod1-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.806277ms -Jul 29 15:50:15.081: INFO: Pod "pod1-1-sched-preemption-medium-priority" satisfied condition "running" -Jul 29 15:50:15.082: INFO: Waiting up to 5m0s for pod "pod2-0-sched-preemption-medium-priority" in namespace "sched-preemption-7590" to be "running" -Jul 29 15:50:15.090: INFO: Pod "pod2-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 8.109988ms -Jul 29 15:50:15.090: INFO: Pod "pod2-0-sched-preemption-medium-priority" satisfied condition "running" -Jul 29 15:50:15.090: INFO: Waiting up to 5m0s for pod "pod2-1-sched-preemption-medium-priority" in namespace "sched-preemption-7590" to be "running" -Jul 29 15:50:15.096: INFO: Pod "pod2-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 6.286333ms -Jul 29 15:50:15.096: INFO: Pod "pod2-1-sched-preemption-medium-priority" satisfied condition "running" -STEP: Run a critical pod that use same resources as that of a lower priority pod 07/29/23 15:50:15.097 -Jul 29 15:50:15.113: INFO: Waiting up to 2m0s for pod "critical-pod" in namespace "kube-system" to be "running" -Jul 29 15:50:15.119: INFO: Pod "critical-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.060695ms -Jul 29 15:50:17.128: INFO: Pod "critical-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014628569s -Jul 29 15:50:19.130: INFO: Pod "critical-pod": Phase="Running", Reason="", readiness=true. Elapsed: 4.016583744s -Jul 29 15:50:19.130: INFO: Pod "critical-pod" satisfied condition "running" -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] +[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] + test/e2e/apimachinery/garbage_collector.go:550 +STEP: create the deployment 08/24/23 11:46:52.805 +STEP: Wait for the Deployment to create new ReplicaSet 08/24/23 11:46:52.816 +STEP: delete the deployment 08/24/23 11:46:53.336 +STEP: wait for deployment deletion to see if the garbage collector mistakenly deletes the rs 08/24/23 11:46:53.357 +STEP: Gathering metrics 08/24/23 11:46:53.92 +Aug 24 11:46:53.973: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" +Aug 24 11:46:53.979: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.548052ms +Aug 24 11:46:53.980: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) +Aug 24 11:46:53.980: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" +Aug 24 11:46:54.208: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 -Jul 29 15:50:19.207: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:84 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] +Aug 24 11:46:54.208: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 -STEP: Destroying namespace "sched-preemption-7590" for this suite. 07/29/23 15:50:19.346 +STEP: Destroying namespace "gc-1344" for this suite. 08/24/23 11:46:54.234 ------------------------------ -• [SLOW TEST] [68.851 seconds] -[sig-scheduling] SchedulerPreemption [Serial] -test/e2e/scheduling/framework.go:40 - validates lower priority pod preemption by critical pod [Conformance] - test/e2e/scheduling/preemption.go:224 +• [1.496 seconds] +[sig-api-machinery] Garbage collector +test/e2e/apimachinery/framework.go:23 + should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] + test/e2e/apimachinery/garbage_collector.go:550 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + [BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:49:10.519 - Jul 29 15:49:10.520: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sched-preemption 07/29/23 15:49:10.528 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:49:10.557 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:49:10.562 - [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + STEP: Creating a kubernetes client 08/24/23 11:46:52.75 + Aug 24 11:46:52.750: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename gc 08/24/23 11:46:52.753 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:46:52.795 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:46:52.8 + [BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:97 - Jul 29 15:49:10.597: INFO: Waiting up to 1m0s for all nodes to be ready - Jul 29 15:50:10.669: INFO: Waiting for terminating namespaces to be deleted... - [It] validates lower priority pod preemption by critical pod [Conformance] - test/e2e/scheduling/preemption.go:224 - STEP: Create pods that use 4/5 of node resources. 07/29/23 15:50:10.681 - Jul 29 15:50:10.733: INFO: Created pod: pod0-0-sched-preemption-low-priority - Jul 29 15:50:10.758: INFO: Created pod: pod0-1-sched-preemption-medium-priority - Jul 29 15:50:10.868: INFO: Created pod: pod1-0-sched-preemption-medium-priority - Jul 29 15:50:10.891: INFO: Created pod: pod1-1-sched-preemption-medium-priority - Jul 29 15:50:10.978: INFO: Created pod: pod2-0-sched-preemption-medium-priority - Jul 29 15:50:11.015: INFO: Created pod: pod2-1-sched-preemption-medium-priority - STEP: Wait for pods to be scheduled. 07/29/23 15:50:11.016 - Jul 29 15:50:11.021: INFO: Waiting up to 5m0s for pod "pod0-0-sched-preemption-low-priority" in namespace "sched-preemption-7590" to be "running" - Jul 29 15:50:11.045: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 24.042994ms - Jul 29 15:50:13.054: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Running", Reason="", readiness=true. Elapsed: 2.033296041s - Jul 29 15:50:13.054: INFO: Pod "pod0-0-sched-preemption-low-priority" satisfied condition "running" - Jul 29 15:50:13.054: INFO: Waiting up to 5m0s for pod "pod0-1-sched-preemption-medium-priority" in namespace "sched-preemption-7590" to be "running" - Jul 29 15:50:13.060: INFO: Pod "pod0-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.981588ms - Jul 29 15:50:13.061: INFO: Pod "pod0-1-sched-preemption-medium-priority" satisfied condition "running" - Jul 29 15:50:13.061: INFO: Waiting up to 5m0s for pod "pod1-0-sched-preemption-medium-priority" in namespace "sched-preemption-7590" to be "running" - Jul 29 15:50:13.067: INFO: Pod "pod1-0-sched-preemption-medium-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 6.181387ms - Jul 29 15:50:15.075: INFO: Pod "pod1-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 2.014003934s - Jul 29 15:50:15.075: INFO: Pod "pod1-0-sched-preemption-medium-priority" satisfied condition "running" - Jul 29 15:50:15.075: INFO: Waiting up to 5m0s for pod "pod1-1-sched-preemption-medium-priority" in namespace "sched-preemption-7590" to be "running" - Jul 29 15:50:15.081: INFO: Pod "pod1-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.806277ms - Jul 29 15:50:15.081: INFO: Pod "pod1-1-sched-preemption-medium-priority" satisfied condition "running" - Jul 29 15:50:15.082: INFO: Waiting up to 5m0s for pod "pod2-0-sched-preemption-medium-priority" in namespace "sched-preemption-7590" to be "running" - Jul 29 15:50:15.090: INFO: Pod "pod2-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 8.109988ms - Jul 29 15:50:15.090: INFO: Pod "pod2-0-sched-preemption-medium-priority" satisfied condition "running" - Jul 29 15:50:15.090: INFO: Waiting up to 5m0s for pod "pod2-1-sched-preemption-medium-priority" in namespace "sched-preemption-7590" to be "running" - Jul 29 15:50:15.096: INFO: Pod "pod2-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 6.286333ms - Jul 29 15:50:15.096: INFO: Pod "pod2-1-sched-preemption-medium-priority" satisfied condition "running" - STEP: Run a critical pod that use same resources as that of a lower priority pod 07/29/23 15:50:15.097 - Jul 29 15:50:15.113: INFO: Waiting up to 2m0s for pod "critical-pod" in namespace "kube-system" to be "running" - Jul 29 15:50:15.119: INFO: Pod "critical-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.060695ms - Jul 29 15:50:17.128: INFO: Pod "critical-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014628569s - Jul 29 15:50:19.130: INFO: Pod "critical-pod": Phase="Running", Reason="", readiness=true. Elapsed: 4.016583744s - Jul 29 15:50:19.130: INFO: Pod "critical-pod" satisfied condition "running" - [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + [It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] + test/e2e/apimachinery/garbage_collector.go:550 + STEP: create the deployment 08/24/23 11:46:52.805 + STEP: Wait for the Deployment to create new ReplicaSet 08/24/23 11:46:52.816 + STEP: delete the deployment 08/24/23 11:46:53.336 + STEP: wait for deployment deletion to see if the garbage collector mistakenly deletes the rs 08/24/23 11:46:53.357 + STEP: Gathering metrics 08/24/23 11:46:53.92 + Aug 24 11:46:53.973: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" + Aug 24 11:46:53.979: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.548052ms + Aug 24 11:46:53.980: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) + Aug 24 11:46:53.980: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" + Aug 24 11:46:54.208: INFO: For apiserver_request_total: + For apiserver_request_latency_seconds: + For apiserver_init_events_total: + For garbage_collector_attempt_to_delete_queue_latency: + For garbage_collector_attempt_to_delete_work_duration: + For garbage_collector_attempt_to_orphan_queue_latency: + For garbage_collector_attempt_to_orphan_work_duration: + For garbage_collector_dirty_processing_latency_microseconds: + For garbage_collector_event_processing_latency_microseconds: + For garbage_collector_graph_changes_queue_latency: + For garbage_collector_graph_changes_work_duration: + For garbage_collector_orphan_processing_latency_microseconds: + For namespace_queue_latency: + For namespace_queue_latency_sum: + For namespace_queue_latency_count: + For namespace_retries: + For namespace_work_duration: + For namespace_work_duration_sum: + For namespace_work_duration_count: + For function_duration_seconds: + For errors_total: + For evicted_pods_total: + + [AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 - Jul 29 15:50:19.207: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:84 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + Aug 24 11:46:54.208: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 - STEP: Destroying namespace "sched-preemption-7590" for this suite. 07/29/23 15:50:19.346 + STEP: Destroying namespace "gc-1344" for this suite. 08/24/23 11:46:54.234 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] ReplicaSet - Replicaset should have a working scale subresource [Conformance] - test/e2e/apps/replica_set.go:143 -[BeforeEach] [sig-apps] ReplicaSet +[sig-node] Pods + should get a host IP [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:204 +[BeforeEach] [sig-node] Pods set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:50:19.372 -Jul 29 15:50:19.372: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replicaset 07/29/23 15:50:19.391 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:19.429 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:19.433 -[BeforeEach] [sig-apps] ReplicaSet +STEP: Creating a kubernetes client 08/24/23 11:46:54.258 +Aug 24 11:46:54.258: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 11:46:54.262 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:46:54.294 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:46:54.299 +[BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 -[It] Replicaset should have a working scale subresource [Conformance] - test/e2e/apps/replica_set.go:143 -STEP: Creating replica set "test-rs" that asks for more than the allowed pod quota 07/29/23 15:50:19.437 -Jul 29 15:50:19.453: INFO: Pod name sample-pod: Found 0 pods out of 1 -Jul 29 15:50:24.465: INFO: Pod name sample-pod: Found 1 pods out of 1 -STEP: ensuring each pod is running 07/29/23 15:50:24.465 -STEP: getting scale subresource 07/29/23 15:50:24.466 -STEP: updating a scale subresource 07/29/23 15:50:24.48 -STEP: verifying the replicaset Spec.Replicas was modified 07/29/23 15:50:24.496 -STEP: Patch a scale subresource 07/29/23 15:50:24.509 -[AfterEach] [sig-apps] ReplicaSet +[BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 +[It] should get a host IP [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:204 +STEP: creating pod 08/24/23 11:46:54.304 +Aug 24 11:46:54.317: INFO: Waiting up to 5m0s for pod "pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b" in namespace "pods-6749" to be "running and ready" +Aug 24 11:46:54.324: INFO: Pod "pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b": Phase="Pending", Reason="", readiness=false. Elapsed: 6.389582ms +Aug 24 11:46:54.324: INFO: The phase of Pod pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:46:56.331: INFO: Pod "pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b": Phase="Running", Reason="", readiness=true. Elapsed: 2.013816788s +Aug 24 11:46:56.331: INFO: The phase of Pod pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b is Running (Ready = true) +Aug 24 11:46:56.331: INFO: Pod "pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b" satisfied condition "running and ready" +Aug 24 11:46:56.346: INFO: Pod pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b has hostIP: 192.168.121.130 +[AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 -Jul 29 15:50:24.584: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ReplicaSet +Aug 24 11:46:56.346: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ReplicaSet +[DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ReplicaSet +[DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 -STEP: Destroying namespace "replicaset-9128" for this suite. 07/29/23 15:50:24.597 +STEP: Destroying namespace "pods-6749" for this suite. 08/24/23 11:46:56.355 ------------------------------ -• [SLOW TEST] [5.250 seconds] -[sig-apps] ReplicaSet -test/e2e/apps/framework.go:23 - Replicaset should have a working scale subresource [Conformance] - test/e2e/apps/replica_set.go:143 +• [2.109 seconds] +[sig-node] Pods +test/e2e/common/node/framework.go:23 + should get a host IP [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:204 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ReplicaSet + [BeforeEach] [sig-node] Pods set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:50:19.372 - Jul 29 15:50:19.372: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replicaset 07/29/23 15:50:19.391 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:19.429 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:19.433 - [BeforeEach] [sig-apps] ReplicaSet + STEP: Creating a kubernetes client 08/24/23 11:46:54.258 + Aug 24 11:46:54.258: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 11:46:54.262 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:46:54.294 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:46:54.299 + [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 - [It] Replicaset should have a working scale subresource [Conformance] - test/e2e/apps/replica_set.go:143 - STEP: Creating replica set "test-rs" that asks for more than the allowed pod quota 07/29/23 15:50:19.437 - Jul 29 15:50:19.453: INFO: Pod name sample-pod: Found 0 pods out of 1 - Jul 29 15:50:24.465: INFO: Pod name sample-pod: Found 1 pods out of 1 - STEP: ensuring each pod is running 07/29/23 15:50:24.465 - STEP: getting scale subresource 07/29/23 15:50:24.466 - STEP: updating a scale subresource 07/29/23 15:50:24.48 - STEP: verifying the replicaset Spec.Replicas was modified 07/29/23 15:50:24.496 - STEP: Patch a scale subresource 07/29/23 15:50:24.509 - [AfterEach] [sig-apps] ReplicaSet + [BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 + [It] should get a host IP [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:204 + STEP: creating pod 08/24/23 11:46:54.304 + Aug 24 11:46:54.317: INFO: Waiting up to 5m0s for pod "pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b" in namespace "pods-6749" to be "running and ready" + Aug 24 11:46:54.324: INFO: Pod "pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b": Phase="Pending", Reason="", readiness=false. Elapsed: 6.389582ms + Aug 24 11:46:54.324: INFO: The phase of Pod pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:46:56.331: INFO: Pod "pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b": Phase="Running", Reason="", readiness=true. Elapsed: 2.013816788s + Aug 24 11:46:56.331: INFO: The phase of Pod pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b is Running (Ready = true) + Aug 24 11:46:56.331: INFO: Pod "pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b" satisfied condition "running and ready" + Aug 24 11:46:56.346: INFO: Pod pod-hostip-db059b64-d36f-47d3-8532-4d70641d761b has hostIP: 192.168.121.130 + [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 - Jul 29 15:50:24.584: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ReplicaSet + Aug 24 11:46:56.346: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ReplicaSet + [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ReplicaSet + [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 - STEP: Destroying namespace "replicaset-9128" for this suite. 07/29/23 15:50:24.597 + STEP: Destroying namespace "pods-6749" for this suite. 08/24/23 11:46:56.355 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for CRD without validation schema [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:153 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[sig-auth] ServiceAccounts + should update a ServiceAccount [Conformance] + test/e2e/auth/service_accounts.go:810 +[BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:50:24.622 -Jul 29 15:50:24.622: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 15:50:24.629 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:24.672 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:24.679 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 11:46:56.367 +Aug 24 11:46:56.367: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename svcaccounts 08/24/23 11:46:56.369 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:46:56.394 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:46:56.399 +[BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 -[It] works for CRD without validation schema [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:153 -Jul 29 15:50:24.689: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 07/29/23 15:50:28.126 -Jul 29 15:50:28.127: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-4373 --namespace=crd-publish-openapi-4373 create -f -' -Jul 29 15:50:29.751: INFO: stderr: "" -Jul 29 15:50:29.751: INFO: stdout: "e2e-test-crd-publish-openapi-4763-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" -Jul 29 15:50:29.751: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-4373 --namespace=crd-publish-openapi-4373 delete e2e-test-crd-publish-openapi-4763-crds test-cr' -Jul 29 15:50:29.974: INFO: stderr: "" -Jul 29 15:50:29.974: INFO: stdout: "e2e-test-crd-publish-openapi-4763-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" -Jul 29 15:50:29.975: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-4373 --namespace=crd-publish-openapi-4373 apply -f -' -Jul 29 15:50:30.566: INFO: stderr: "" -Jul 29 15:50:30.566: INFO: stdout: "e2e-test-crd-publish-openapi-4763-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" -Jul 29 15:50:30.568: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-4373 --namespace=crd-publish-openapi-4373 delete e2e-test-crd-publish-openapi-4763-crds test-cr' -Jul 29 15:50:30.760: INFO: stderr: "" -Jul 29 15:50:30.760: INFO: stdout: "e2e-test-crd-publish-openapi-4763-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" -STEP: kubectl explain works to explain CR without validation schema 07/29/23 15:50:30.76 -Jul 29 15:50:30.764: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-4373 explain e2e-test-crd-publish-openapi-4763-crds' -Jul 29 15:50:31.843: INFO: stderr: "" -Jul 29 15:50:31.843: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4763-crd\nVERSION: crd-publish-openapi-test-empty.example.com/v1\n\nDESCRIPTION:\n \n" -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[It] should update a ServiceAccount [Conformance] + test/e2e/auth/service_accounts.go:810 +STEP: Creating ServiceAccount "e2e-sa-5zjtq" 08/24/23 11:46:56.404 +Aug 24 11:46:56.414: INFO: AutomountServiceAccountToken: false +STEP: Updating ServiceAccount "e2e-sa-5zjtq" 08/24/23 11:46:56.414 +Aug 24 11:46:56.426: INFO: AutomountServiceAccountToken: true +[AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 -Jul 29 15:50:34.337: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +Aug 24 11:46:56.427: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 -STEP: Destroying namespace "crd-publish-openapi-4373" for this suite. 07/29/23 15:50:34.356 +STEP: Destroying namespace "svcaccounts-7286" for this suite. 08/24/23 11:46:56.435 ------------------------------ -• [SLOW TEST] [9.745 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - works for CRD without validation schema [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:153 +• [0.088 seconds] +[sig-auth] ServiceAccounts +test/e2e/auth/framework.go:23 + should update a ServiceAccount [Conformance] + test/e2e/auth/service_accounts.go:810 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:50:24.622 - Jul 29 15:50:24.622: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 15:50:24.629 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:24.672 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:24.679 - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 11:46:56.367 + Aug 24 11:46:56.367: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename svcaccounts 08/24/23 11:46:56.369 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:46:56.394 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:46:56.399 + [BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 - [It] works for CRD without validation schema [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:153 - Jul 29 15:50:24.689: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 07/29/23 15:50:28.126 - Jul 29 15:50:28.127: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-4373 --namespace=crd-publish-openapi-4373 create -f -' - Jul 29 15:50:29.751: INFO: stderr: "" - Jul 29 15:50:29.751: INFO: stdout: "e2e-test-crd-publish-openapi-4763-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" - Jul 29 15:50:29.751: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-4373 --namespace=crd-publish-openapi-4373 delete e2e-test-crd-publish-openapi-4763-crds test-cr' - Jul 29 15:50:29.974: INFO: stderr: "" - Jul 29 15:50:29.974: INFO: stdout: "e2e-test-crd-publish-openapi-4763-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" - Jul 29 15:50:29.975: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-4373 --namespace=crd-publish-openapi-4373 apply -f -' - Jul 29 15:50:30.566: INFO: stderr: "" - Jul 29 15:50:30.566: INFO: stdout: "e2e-test-crd-publish-openapi-4763-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" - Jul 29 15:50:30.568: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-4373 --namespace=crd-publish-openapi-4373 delete e2e-test-crd-publish-openapi-4763-crds test-cr' - Jul 29 15:50:30.760: INFO: stderr: "" - Jul 29 15:50:30.760: INFO: stdout: "e2e-test-crd-publish-openapi-4763-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" - STEP: kubectl explain works to explain CR without validation schema 07/29/23 15:50:30.76 - Jul 29 15:50:30.764: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-4373 explain e2e-test-crd-publish-openapi-4763-crds' - Jul 29 15:50:31.843: INFO: stderr: "" - Jul 29 15:50:31.843: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4763-crd\nVERSION: crd-publish-openapi-test-empty.example.com/v1\n\nDESCRIPTION:\n \n" - [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [It] should update a ServiceAccount [Conformance] + test/e2e/auth/service_accounts.go:810 + STEP: Creating ServiceAccount "e2e-sa-5zjtq" 08/24/23 11:46:56.404 + Aug 24 11:46:56.414: INFO: AutomountServiceAccountToken: false + STEP: Updating ServiceAccount "e2e-sa-5zjtq" 08/24/23 11:46:56.414 + Aug 24 11:46:56.426: INFO: AutomountServiceAccountToken: true + [AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 - Jul 29 15:50:34.337: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + Aug 24 11:46:56.427: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 - STEP: Destroying namespace "crd-publish-openapi-4373" for this suite. 07/29/23 15:50:34.356 + STEP: Destroying namespace "svcaccounts-7286" for this suite. 08/24/23 11:46:56.435 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] DNS - should provide DNS for pods for Hostname [Conformance] - test/e2e/network/dns.go:248 -[BeforeEach] [sig-network] DNS +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate custom resource with different stored version [Conformance] + test/e2e/apimachinery/webhook.go:323 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:50:34.369 -Jul 29 15:50:34.369: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename dns 07/29/23 15:50:34.372 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:34.407 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:34.414 -[BeforeEach] [sig-network] DNS +STEP: Creating a kubernetes client 08/24/23 11:46:56.464 +Aug 24 11:46:56.464: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 11:46:56.467 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:46:56.49 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:46:56.493 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should provide DNS for pods for Hostname [Conformance] - test/e2e/network/dns.go:248 -STEP: Creating a test headless service 07/29/23 15:50:34.42 -STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-9936.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-2.dns-test-service-2.dns-9936.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/wheezy_hosts@dns-querier-2;sleep 1; done - 07/29/23 15:50:34.43 -STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-9936.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-2.dns-test-service-2.dns-9936.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/jessie_hosts@dns-querier-2;sleep 1; done - 07/29/23 15:50:34.431 -STEP: creating a pod to probe DNS 07/29/23 15:50:34.433 -STEP: submitting the pod to kubernetes 07/29/23 15:50:34.433 -Jul 29 15:50:34.452: INFO: Waiting up to 15m0s for pod "dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078" in namespace "dns-9936" to be "running" -Jul 29 15:50:34.461: INFO: Pod "dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078": Phase="Pending", Reason="", readiness=false. Elapsed: 9.48012ms -Jul 29 15:50:36.470: INFO: Pod "dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018700768s -Jul 29 15:50:38.470: INFO: Pod "dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078": Phase="Running", Reason="", readiness=true. Elapsed: 4.017975169s -Jul 29 15:50:38.470: INFO: Pod "dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078" satisfied condition "running" -STEP: retrieving the pod 07/29/23 15:50:38.47 -STEP: looking for the results for each expected name from probers 07/29/23 15:50:38.477 -Jul 29 15:50:38.505: INFO: DNS probes using dns-9936/dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078 succeeded - -STEP: deleting the pod 07/29/23 15:50:38.506 -STEP: deleting the test headless service 07/29/23 15:50:38.533 -[AfterEach] [sig-network] DNS +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 11:46:56.522 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 11:46:57.564 +STEP: Deploying the webhook pod 08/24/23 11:46:57.588 +STEP: Wait for the deployment to be ready 08/24/23 11:46:57.611 +Aug 24 11:46:57.632: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service 08/24/23 11:46:59.649 +STEP: Verifying the service has paired with the endpoint 08/24/23 11:46:59.671 +Aug 24 11:47:00.672: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate custom resource with different stored version [Conformance] + test/e2e/apimachinery/webhook.go:323 +Aug 24 11:47:00.678: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Registering the mutating webhook for custom resource e2e-test-webhook-8009-crds.webhook.example.com via the AdmissionRegistration API 08/24/23 11:47:01.196 +STEP: Creating a custom resource while v1 is storage version 08/24/23 11:47:01.232 +STEP: Patching Custom Resource Definition to set v2 as storage 08/24/23 11:47:03.522 +STEP: Patching the custom resource while v2 is storage version 08/24/23 11:47:03.563 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 15:50:38.573: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] DNS +Aug 24 11:47:04.351: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "dns-9936" for this suite. 07/29/23 15:50:38.583 +STEP: Destroying namespace "webhook-2010" for this suite. 08/24/23 11:47:04.459 +STEP: Destroying namespace "webhook-2010-markers" for this suite. 08/24/23 11:47:04.479 ------------------------------ -• [4.237 seconds] -[sig-network] DNS -test/e2e/network/common/framework.go:23 - should provide DNS for pods for Hostname [Conformance] - test/e2e/network/dns.go:248 +• [SLOW TEST] [8.035 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should mutate custom resource with different stored version [Conformance] + test/e2e/apimachinery/webhook.go:323 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] DNS + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:50:34.369 - Jul 29 15:50:34.369: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename dns 07/29/23 15:50:34.372 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:34.407 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:34.414 - [BeforeEach] [sig-network] DNS + STEP: Creating a kubernetes client 08/24/23 11:46:56.464 + Aug 24 11:46:56.464: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 11:46:56.467 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:46:56.49 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:46:56.493 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should provide DNS for pods for Hostname [Conformance] - test/e2e/network/dns.go:248 - STEP: Creating a test headless service 07/29/23 15:50:34.42 - STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-9936.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-2.dns-test-service-2.dns-9936.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/wheezy_hosts@dns-querier-2;sleep 1; done - 07/29/23 15:50:34.43 - STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-9936.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-2.dns-test-service-2.dns-9936.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/jessie_hosts@dns-querier-2;sleep 1; done - 07/29/23 15:50:34.431 - STEP: creating a pod to probe DNS 07/29/23 15:50:34.433 - STEP: submitting the pod to kubernetes 07/29/23 15:50:34.433 - Jul 29 15:50:34.452: INFO: Waiting up to 15m0s for pod "dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078" in namespace "dns-9936" to be "running" - Jul 29 15:50:34.461: INFO: Pod "dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078": Phase="Pending", Reason="", readiness=false. Elapsed: 9.48012ms - Jul 29 15:50:36.470: INFO: Pod "dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018700768s - Jul 29 15:50:38.470: INFO: Pod "dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078": Phase="Running", Reason="", readiness=true. Elapsed: 4.017975169s - Jul 29 15:50:38.470: INFO: Pod "dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078" satisfied condition "running" - STEP: retrieving the pod 07/29/23 15:50:38.47 - STEP: looking for the results for each expected name from probers 07/29/23 15:50:38.477 - Jul 29 15:50:38.505: INFO: DNS probes using dns-9936/dns-test-62e65ffa-8c5f-4fea-bf57-5a75ac89c078 succeeded - - STEP: deleting the pod 07/29/23 15:50:38.506 - STEP: deleting the test headless service 07/29/23 15:50:38.533 - [AfterEach] [sig-network] DNS - test/e2e/framework/node/init/init.go:32 - Jul 29 15:50:38.573: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] DNS + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 11:46:56.522 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 11:46:57.564 + STEP: Deploying the webhook pod 08/24/23 11:46:57.588 + STEP: Wait for the deployment to be ready 08/24/23 11:46:57.611 + Aug 24 11:46:57.632: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set + STEP: Deploying the webhook service 08/24/23 11:46:59.649 + STEP: Verifying the service has paired with the endpoint 08/24/23 11:46:59.671 + Aug 24 11:47:00.672: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should mutate custom resource with different stored version [Conformance] + test/e2e/apimachinery/webhook.go:323 + Aug 24 11:47:00.678: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Registering the mutating webhook for custom resource e2e-test-webhook-8009-crds.webhook.example.com via the AdmissionRegistration API 08/24/23 11:47:01.196 + STEP: Creating a custom resource while v1 is storage version 08/24/23 11:47:01.232 + STEP: Patching Custom Resource Definition to set v2 as storage 08/24/23 11:47:03.522 + STEP: Patching the custom resource while v2 is storage version 08/24/23 11:47:03.563 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/node/init/init.go:32 + Aug 24 11:47:04.351: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "dns-9936" for this suite. 07/29/23 15:50:38.583 + STEP: Destroying namespace "webhook-2010" for this suite. 08/24/23 11:47:04.459 + STEP: Destroying namespace "webhook-2010-markers" for this suite. 08/24/23 11:47:04.479 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SS ------------------------------ -[sig-storage] Projected downwardAPI - should provide container's cpu request [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:221 -[BeforeEach] [sig-storage] Projected downwardAPI +[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + should be able to convert from CR v1 to CR v2 [Conformance] + test/e2e/apimachinery/crd_conversion_webhook.go:149 +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:50:38.624 -Jul 29 15:50:38.624: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 15:50:38.626 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:38.657 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:38.663 -[BeforeEach] [sig-storage] Projected downwardAPI +STEP: Creating a kubernetes client 08/24/23 11:47:04.501 +Aug 24 11:47:04.501: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-webhook 08/24/23 11:47:04.505 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:47:04.555 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:47:04.561 +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should provide container's cpu request [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:221 -STEP: Creating a pod to test downward API volume plugin 07/29/23 15:50:38.668 -Jul 29 15:50:38.684: INFO: Waiting up to 5m0s for pod "downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9" in namespace "projected-9741" to be "Succeeded or Failed" -Jul 29 15:50:38.689: INFO: Pod "downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9": Phase="Pending", Reason="", readiness=false. Elapsed: 5.143718ms -Jul 29 15:50:40.697: INFO: Pod "downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013191667s -Jul 29 15:50:42.696: INFO: Pod "downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01251694s -STEP: Saw pod success 07/29/23 15:50:42.696 -Jul 29 15:50:42.697: INFO: Pod "downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9" satisfied condition "Succeeded or Failed" -Jul 29 15:50:42.704: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9 container client-container: -STEP: delete the pod 07/29/23 15:50:42.736 -Jul 29 15:50:42.765: INFO: Waiting for pod downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9 to disappear -Jul 29 15:50:42.775: INFO: Pod downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/crd_conversion_webhook.go:128 +STEP: Setting up server cert 08/24/23 11:47:04.566 +STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication 08/24/23 11:47:05.714 +STEP: Deploying the custom resource conversion webhook pod 08/24/23 11:47:05.723 +STEP: Wait for the deployment to be ready 08/24/23 11:47:05.741 +Aug 24 11:47:05.751: INFO: new replicaset for deployment "sample-crd-conversion-webhook-deployment" is yet to be created +STEP: Deploying the webhook service 08/24/23 11:47:07.769 +STEP: Verifying the service has paired with the endpoint 08/24/23 11:47:07.786 +Aug 24 11:47:08.787: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 +[It] should be able to convert from CR v1 to CR v2 [Conformance] + test/e2e/apimachinery/crd_conversion_webhook.go:149 +Aug 24 11:47:08.798: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Creating a v1 custom resource 08/24/23 11:47:11.645 +STEP: v2 custom resource should be converted 08/24/23 11:47:11.653 +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 15:50:42.775: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +Aug 24 11:47:12.182: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/crd_conversion_webhook.go:139 +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "projected-9741" for this suite. 07/29/23 15:50:42.786 +STEP: Destroying namespace "crd-webhook-1440" for this suite. 08/24/23 11:47:12.28 ------------------------------ -• [4.175 seconds] -[sig-storage] Projected downwardAPI -test/e2e/common/storage/framework.go:23 - should provide container's cpu request [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:221 +• [SLOW TEST] [7.807 seconds] +[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should be able to convert from CR v1 to CR v2 [Conformance] + test/e2e/apimachinery/crd_conversion_webhook.go:149 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:50:38.624 - Jul 29 15:50:38.624: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 15:50:38.626 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:38.657 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:38.663 - [BeforeEach] [sig-storage] Projected downwardAPI + STEP: Creating a kubernetes client 08/24/23 11:47:04.501 + Aug 24 11:47:04.501: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-webhook 08/24/23 11:47:04.505 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:47:04.555 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:47:04.561 + [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should provide container's cpu request [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:221 - STEP: Creating a pod to test downward API volume plugin 07/29/23 15:50:38.668 - Jul 29 15:50:38.684: INFO: Waiting up to 5m0s for pod "downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9" in namespace "projected-9741" to be "Succeeded or Failed" - Jul 29 15:50:38.689: INFO: Pod "downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9": Phase="Pending", Reason="", readiness=false. Elapsed: 5.143718ms - Jul 29 15:50:40.697: INFO: Pod "downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013191667s - Jul 29 15:50:42.696: INFO: Pod "downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01251694s - STEP: Saw pod success 07/29/23 15:50:42.696 - Jul 29 15:50:42.697: INFO: Pod "downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9" satisfied condition "Succeeded or Failed" - Jul 29 15:50:42.704: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9 container client-container: - STEP: delete the pod 07/29/23 15:50:42.736 - Jul 29 15:50:42.765: INFO: Waiting for pod downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9 to disappear - Jul 29 15:50:42.775: INFO: Pod downwardapi-volume-0ca93a9d-abf6-4da4-b92c-e0140732b9e9 no longer exists - [AfterEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/crd_conversion_webhook.go:128 + STEP: Setting up server cert 08/24/23 11:47:04.566 + STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication 08/24/23 11:47:05.714 + STEP: Deploying the custom resource conversion webhook pod 08/24/23 11:47:05.723 + STEP: Wait for the deployment to be ready 08/24/23 11:47:05.741 + Aug 24 11:47:05.751: INFO: new replicaset for deployment "sample-crd-conversion-webhook-deployment" is yet to be created + STEP: Deploying the webhook service 08/24/23 11:47:07.769 + STEP: Verifying the service has paired with the endpoint 08/24/23 11:47:07.786 + Aug 24 11:47:08.787: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 + [It] should be able to convert from CR v1 to CR v2 [Conformance] + test/e2e/apimachinery/crd_conversion_webhook.go:149 + Aug 24 11:47:08.798: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Creating a v1 custom resource 08/24/23 11:47:11.645 + STEP: v2 custom resource should be converted 08/24/23 11:47:11.653 + [AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 15:50:42.775: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 11:47:12.182: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/crd_conversion_webhook.go:139 + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "projected-9741" for this suite. 07/29/23 15:50:42.786 + STEP: Destroying namespace "crd-webhook-1440" for this suite. 08/24/23 11:47:12.28 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Proxy server - should support proxy with --port 0 [Conformance] - test/e2e/kubectl/kubectl.go:1787 -[BeforeEach] [sig-cli] Kubectl client +[sig-network] Services + should have session affinity work for NodePort service [LinuxOnly] [Conformance] + test/e2e/network/service.go:2228 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:50:42.8 -Jul 29 15:50:42.800: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 15:50:42.803 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:42.836 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:42.84 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 11:47:12.325 +Aug 24 11:47:12.326: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 11:47:12.334 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:47:12.37 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:47:12.375 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[It] should support proxy with --port 0 [Conformance] - test/e2e/kubectl/kubectl.go:1787 -STEP: starting the proxy server 07/29/23 15:50:42.847 -Jul 29 15:50:42.848: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-4355 proxy -p 0 --disable-filter' -STEP: curling proxy /api/ output 07/29/23 15:50:42.951 -[AfterEach] [sig-cli] Kubectl client +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should have session affinity work for NodePort service [LinuxOnly] [Conformance] + test/e2e/network/service.go:2228 +STEP: creating service in namespace services-4230 08/24/23 11:47:12.381 +STEP: creating service affinity-nodeport in namespace services-4230 08/24/23 11:47:12.381 +STEP: creating replication controller affinity-nodeport in namespace services-4230 08/24/23 11:47:12.407 +I0824 11:47:12.421424 14 runners.go:193] Created replication controller with name: affinity-nodeport, namespace: services-4230, replica count: 3 +I0824 11:47:15.473628 14 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 2 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0824 11:47:18.474330 14 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 2 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0824 11:47:21.475756 14 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 2 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0824 11:47:24.477081 14 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 2 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0824 11:47:27.477665 14 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Aug 24 11:47:27.497: INFO: Creating new exec pod +Aug 24 11:47:27.517: INFO: Waiting up to 5m0s for pod "execpod-affinitywvbq2" in namespace "services-4230" to be "running" +Aug 24 11:47:27.530: INFO: Pod "execpod-affinitywvbq2": Phase="Pending", Reason="", readiness=false. Elapsed: 13.292084ms +Aug 24 11:47:29.546: INFO: Pod "execpod-affinitywvbq2": Phase="Running", Reason="", readiness=true. Elapsed: 2.028964449s +Aug 24 11:47:29.546: INFO: Pod "execpod-affinitywvbq2" satisfied condition "running" +Aug 24 11:47:30.558: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-4230 exec execpod-affinitywvbq2 -- /bin/sh -x -c nc -v -z -w 2 affinity-nodeport 80' +Aug 24 11:47:30.875: INFO: stderr: "+ nc -v -z -w 2 affinity-nodeport 80\nConnection to affinity-nodeport 80 port [tcp/http] succeeded!\n" +Aug 24 11:47:30.875: INFO: stdout: "" +Aug 24 11:47:30.875: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-4230 exec execpod-affinitywvbq2 -- /bin/sh -x -c nc -v -z -w 2 10.233.19.15 80' +Aug 24 11:47:31.164: INFO: stderr: "+ nc -v -z -w 2 10.233.19.15 80\nConnection to 10.233.19.15 80 port [tcp/http] succeeded!\n" +Aug 24 11:47:31.164: INFO: stdout: "" +Aug 24 11:47:31.164: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-4230 exec execpod-affinitywvbq2 -- /bin/sh -x -c nc -v -z -w 2 192.168.121.111 31524' +Aug 24 11:47:31.429: INFO: stderr: "+ nc -v -z -w 2 192.168.121.111 31524\nConnection to 192.168.121.111 31524 port [tcp/*] succeeded!\n" +Aug 24 11:47:31.429: INFO: stdout: "" +Aug 24 11:47:31.430: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-4230 exec execpod-affinitywvbq2 -- /bin/sh -x -c nc -v -z -w 2 192.168.121.127 31524' +Aug 24 11:47:31.689: INFO: stderr: "+ nc -v -z -w 2 192.168.121.127 31524\nConnection to 192.168.121.127 31524 port [tcp/*] succeeded!\n" +Aug 24 11:47:31.689: INFO: stdout: "" +Aug 24 11:47:31.689: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-4230 exec execpod-affinitywvbq2 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.127:31524/ ; done' +Aug 24 11:47:32.211: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n" +Aug 24 11:47:32.211: INFO: stdout: "\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp" +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp +Aug 24 11:47:32.211: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-nodeport in namespace services-4230, will wait for the garbage collector to delete the pods 08/24/23 11:47:32.237 +Aug 24 11:47:32.313: INFO: Deleting ReplicationController affinity-nodeport took: 12.726578ms +Aug 24 11:47:32.414: INFO: Terminating ReplicationController affinity-nodeport pods took: 100.890611ms +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 15:50:42.967: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 11:47:34.476: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-4355" for this suite. 07/29/23 15:50:42.977 +STEP: Destroying namespace "services-4230" for this suite. 08/24/23 11:47:34.488 ------------------------------ -• [0.199 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Proxy server - test/e2e/kubectl/kubectl.go:1780 - should support proxy with --port 0 [Conformance] - test/e2e/kubectl/kubectl.go:1787 +• [SLOW TEST] [22.181 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should have session affinity work for NodePort service [LinuxOnly] [Conformance] + test/e2e/network/service.go:2228 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:50:42.8 - Jul 29 15:50:42.800: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 15:50:42.803 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:42.836 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:42.84 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 11:47:12.325 + Aug 24 11:47:12.326: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 11:47:12.334 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:47:12.37 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:47:12.375 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [It] should support proxy with --port 0 [Conformance] - test/e2e/kubectl/kubectl.go:1787 - STEP: starting the proxy server 07/29/23 15:50:42.847 - Jul 29 15:50:42.848: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-4355 proxy -p 0 --disable-filter' - STEP: curling proxy /api/ output 07/29/23 15:50:42.951 - [AfterEach] [sig-cli] Kubectl client + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should have session affinity work for NodePort service [LinuxOnly] [Conformance] + test/e2e/network/service.go:2228 + STEP: creating service in namespace services-4230 08/24/23 11:47:12.381 + STEP: creating service affinity-nodeport in namespace services-4230 08/24/23 11:47:12.381 + STEP: creating replication controller affinity-nodeport in namespace services-4230 08/24/23 11:47:12.407 + I0824 11:47:12.421424 14 runners.go:193] Created replication controller with name: affinity-nodeport, namespace: services-4230, replica count: 3 + I0824 11:47:15.473628 14 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 2 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + I0824 11:47:18.474330 14 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 2 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + I0824 11:47:21.475756 14 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 2 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + I0824 11:47:24.477081 14 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 2 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + I0824 11:47:27.477665 14 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + Aug 24 11:47:27.497: INFO: Creating new exec pod + Aug 24 11:47:27.517: INFO: Waiting up to 5m0s for pod "execpod-affinitywvbq2" in namespace "services-4230" to be "running" + Aug 24 11:47:27.530: INFO: Pod "execpod-affinitywvbq2": Phase="Pending", Reason="", readiness=false. Elapsed: 13.292084ms + Aug 24 11:47:29.546: INFO: Pod "execpod-affinitywvbq2": Phase="Running", Reason="", readiness=true. Elapsed: 2.028964449s + Aug 24 11:47:29.546: INFO: Pod "execpod-affinitywvbq2" satisfied condition "running" + Aug 24 11:47:30.558: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-4230 exec execpod-affinitywvbq2 -- /bin/sh -x -c nc -v -z -w 2 affinity-nodeport 80' + Aug 24 11:47:30.875: INFO: stderr: "+ nc -v -z -w 2 affinity-nodeport 80\nConnection to affinity-nodeport 80 port [tcp/http] succeeded!\n" + Aug 24 11:47:30.875: INFO: stdout: "" + Aug 24 11:47:30.875: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-4230 exec execpod-affinitywvbq2 -- /bin/sh -x -c nc -v -z -w 2 10.233.19.15 80' + Aug 24 11:47:31.164: INFO: stderr: "+ nc -v -z -w 2 10.233.19.15 80\nConnection to 10.233.19.15 80 port [tcp/http] succeeded!\n" + Aug 24 11:47:31.164: INFO: stdout: "" + Aug 24 11:47:31.164: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-4230 exec execpod-affinitywvbq2 -- /bin/sh -x -c nc -v -z -w 2 192.168.121.111 31524' + Aug 24 11:47:31.429: INFO: stderr: "+ nc -v -z -w 2 192.168.121.111 31524\nConnection to 192.168.121.111 31524 port [tcp/*] succeeded!\n" + Aug 24 11:47:31.429: INFO: stdout: "" + Aug 24 11:47:31.430: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-4230 exec execpod-affinitywvbq2 -- /bin/sh -x -c nc -v -z -w 2 192.168.121.127 31524' + Aug 24 11:47:31.689: INFO: stderr: "+ nc -v -z -w 2 192.168.121.127 31524\nConnection to 192.168.121.127 31524 port [tcp/*] succeeded!\n" + Aug 24 11:47:31.689: INFO: stdout: "" + Aug 24 11:47:31.689: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-4230 exec execpod-affinitywvbq2 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.127:31524/ ; done' + Aug 24 11:47:32.211: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:31524/\n" + Aug 24 11:47:32.211: INFO: stdout: "\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp\naffinity-nodeport-hrvkp" + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Received response from host: affinity-nodeport-hrvkp + Aug 24 11:47:32.211: INFO: Cleaning up the exec pod + STEP: deleting ReplicationController affinity-nodeport in namespace services-4230, will wait for the garbage collector to delete the pods 08/24/23 11:47:32.237 + Aug 24 11:47:32.313: INFO: Deleting ReplicationController affinity-nodeport took: 12.726578ms + Aug 24 11:47:32.414: INFO: Terminating ReplicationController affinity-nodeport pods took: 100.890611ms + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 15:50:42.967: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 11:47:34.476: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-4355" for this suite. 07/29/23 15:50:42.977 + STEP: Destroying namespace "services-4230" for this suite. 08/24/23 11:47:34.488 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSSSSSSSSS ------------------------------ -[sig-apps] Deployment - deployment should delete old replica sets [Conformance] - test/e2e/apps/deployment.go:122 -[BeforeEach] [sig-apps] Deployment +[sig-network] Networking Granular Checks: Pods + should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:122 +[BeforeEach] [sig-network] Networking set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:50:43 -Jul 29 15:50:43.000: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename deployment 07/29/23 15:50:43.004 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:43.033 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:43.038 -[BeforeEach] [sig-apps] Deployment +STEP: Creating a kubernetes client 08/24/23 11:47:34.514 +Aug 24 11:47:34.514: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pod-network-test 08/24/23 11:47:34.516 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:47:34.549 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:47:34.56 +[BeforeEach] [sig-network] Networking test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 -[It] deployment should delete old replica sets [Conformance] - test/e2e/apps/deployment.go:122 -Jul 29 15:50:43.064: INFO: Pod name cleanup-pod: Found 0 pods out of 1 -Jul 29 15:50:48.070: INFO: Pod name cleanup-pod: Found 1 pods out of 1 -STEP: ensuring each pod is running 07/29/23 15:50:48.07 -Jul 29 15:50:48.071: INFO: Creating deployment test-cleanup-deployment -STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up 07/29/23 15:50:48.093 -[AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 -Jul 29 15:50:48.115: INFO: Deployment "test-cleanup-deployment": -&Deployment{ObjectMeta:{test-cleanup-deployment deployment-2801 a6d57545-22f4-4594-b7a4-ed473f313945 7340 1 2023-07-29 15:50:48 +0000 UTC map[name:cleanup-pod] map[] [] [] [{e2e.test Update apps/v1 2023-07-29 15:50:48 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00543a468 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:0,Replicas:0,UpdatedReplicas:0,AvailableReplicas:0,UnavailableReplicas:0,Conditions:[]DeploymentCondition{},ReadyReplicas:0,CollisionCount:nil,},} - -Jul 29 15:50:48.140: INFO: New ReplicaSet of Deployment "test-cleanup-deployment" is nil. -Jul 29 15:50:48.141: INFO: All old ReplicaSets of Deployment "test-cleanup-deployment": -Jul 29 15:50:48.141: INFO: &ReplicaSet{ObjectMeta:{test-cleanup-controller deployment-2801 19152dd1-ee70-44e7-8c5b-c80822cefb3c 7341 1 2023-07-29 15:50:43 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [{apps/v1 Deployment test-cleanup-deployment a6d57545-22f4-4594-b7a4-ed473f313945 0xc00543a78f 0xc00543a7a0}] [] [{e2e.test Update apps/v1 2023-07-29 15:50:43 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 15:50:44 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-07-29 15:50:48 +0000 UTC FieldsV1 {"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"a6d57545-22f4-4594-b7a4-ed473f313945\"}":{}}}} }]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc00543a868 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} -Jul 29 15:50:48.158: INFO: Pod "test-cleanup-controller-p6frw" is available: -&Pod{ObjectMeta:{test-cleanup-controller-p6frw test-cleanup-controller- deployment-2801 b6a4ef7f-bed8-4b50-ad9c-4d028346b756 7327 0 2023-07-29 15:50:43 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [{apps/v1 ReplicaSet test-cleanup-controller 19152dd1-ee70-44e7-8c5b-c80822cefb3c 0xc0054ccd7f 0xc0054ccd90}] [] [{kube-controller-manager Update v1 2023-07-29 15:50:43 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"19152dd1-ee70-44e7-8c5b-c80822cefb3c\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 15:50:44 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.73\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-wd58p,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wd58p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:50:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:50:44 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:50:44 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:50:43 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.73,StartTime:2023-07-29 15:50:43 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 15:50:44 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://17c4d05232eb09bae8cabf0489199b4306efd3dc6f94d31992bf2c92952cd7e9,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.73,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +[It] should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:122 +STEP: Performing setup for networking test in namespace pod-network-test-9005 08/24/23 11:47:34.565 +STEP: creating a selector 08/24/23 11:47:34.566 +STEP: Creating the service pods in kubernetes 08/24/23 11:47:34.567 +Aug 24 11:47:34.568: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Aug 24 11:47:34.620: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-9005" to be "running and ready" +Aug 24 11:47:34.627: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 6.851551ms +Aug 24 11:47:34.628: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:47:36.638: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.017744014s +Aug 24 11:47:36.638: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:47:38.636: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.015512625s +Aug 24 11:47:38.636: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:47:40.637: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.017006785s +Aug 24 11:47:40.638: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:47:42.637: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.016525048s +Aug 24 11:47:42.637: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:47:44.639: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.018047301s +Aug 24 11:47:44.639: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:47:46.636: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.015275775s +Aug 24 11:47:46.636: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:47:48.637: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.016273993s +Aug 24 11:47:48.637: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:47:50.637: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.016177954s +Aug 24 11:47:50.637: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:47:52.638: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.017936834s +Aug 24 11:47:52.639: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:47:54.639: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.018778916s +Aug 24 11:47:54.639: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:47:56.639: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 22.018579242s +Aug 24 11:47:56.639: INFO: The phase of Pod netserver-0 is Running (Ready = true) +Aug 24 11:47:56.639: INFO: Pod "netserver-0" satisfied condition "running and ready" +Aug 24 11:47:56.647: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-9005" to be "running and ready" +Aug 24 11:47:56.654: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 6.893002ms +Aug 24 11:47:56.654: INFO: The phase of Pod netserver-1 is Running (Ready = true) +Aug 24 11:47:56.654: INFO: Pod "netserver-1" satisfied condition "running and ready" +Aug 24 11:47:56.660: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-9005" to be "running and ready" +Aug 24 11:47:56.666: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 5.280723ms +Aug 24 11:47:56.666: INFO: The phase of Pod netserver-2 is Running (Ready = true) +Aug 24 11:47:56.666: INFO: Pod "netserver-2" satisfied condition "running and ready" +STEP: Creating test pods 08/24/23 11:47:56.671 +Aug 24 11:47:56.702: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-9005" to be "running" +Aug 24 11:47:56.721: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 18.289454ms +Aug 24 11:47:58.728: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.025393799s +Aug 24 11:47:58.728: INFO: Pod "test-container-pod" satisfied condition "running" +Aug 24 11:47:58.733: INFO: Waiting up to 5m0s for pod "host-test-container-pod" in namespace "pod-network-test-9005" to be "running" +Aug 24 11:47:58.737: INFO: Pod "host-test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 3.751608ms +Aug 24 11:47:58.737: INFO: Pod "host-test-container-pod" satisfied condition "running" +Aug 24 11:47:58.741: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 +Aug 24 11:47:58.742: INFO: Going to poll 10.233.64.34 on port 8081 at least 0 times, with a maximum of 39 tries before failing +Aug 24 11:47:58.748: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.64.34 8081 | grep -v '^\s*$'] Namespace:pod-network-test-9005 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:47:58.748: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:47:58.749: INFO: ExecWithOptions: Clientset creation +Aug 24 11:47:58.749: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9005/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.64.34+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) +Aug 24 11:47:59.893: INFO: Found all 1 expected endpoints: [netserver-0] +Aug 24 11:47:59.893: INFO: Going to poll 10.233.65.44 on port 8081 at least 0 times, with a maximum of 39 tries before failing +Aug 24 11:47:59.902: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.65.44 8081 | grep -v '^\s*$'] Namespace:pod-network-test-9005 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:47:59.902: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:47:59.905: INFO: ExecWithOptions: Clientset creation +Aug 24 11:47:59.905: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9005/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.65.44+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) +Aug 24 11:48:01.046: INFO: Found all 1 expected endpoints: [netserver-1] +Aug 24 11:48:01.046: INFO: Going to poll 10.233.66.108 on port 8081 at least 0 times, with a maximum of 39 tries before failing +Aug 24 11:48:01.053: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.66.108 8081 | grep -v '^\s*$'] Namespace:pod-network-test-9005 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:48:01.053: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:48:01.056: INFO: ExecWithOptions: Clientset creation +Aug 24 11:48:01.056: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9005/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.66.108+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) +Aug 24 11:48:02.240: INFO: Found all 1 expected endpoints: [netserver-2] +[AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 -Jul 29 15:50:48.158: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Deployment +Aug 24 11:48:02.240: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Networking test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193 -STEP: Destroying namespace "deployment-2801" for this suite. 07/29/23 15:50:48.198 +STEP: Destroying namespace "pod-network-test-9005" for this suite. 08/24/23 11:48:02.249 ------------------------------ -• [SLOW TEST] [5.242 seconds] -[sig-apps] Deployment -test/e2e/apps/framework.go:23 - deployment should delete old replica sets [Conformance] - test/e2e/apps/deployment.go:122 +• [SLOW TEST] [27.748 seconds] +[sig-network] Networking +test/e2e/common/network/framework.go:23 + Granular Checks: Pods + test/e2e/common/network/networking.go:32 + should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:122 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Deployment + [BeforeEach] [sig-network] Networking set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:50:43 - Jul 29 15:50:43.000: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename deployment 07/29/23 15:50:43.004 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:43.033 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:43.038 - [BeforeEach] [sig-apps] Deployment + STEP: Creating a kubernetes client 08/24/23 11:47:34.514 + Aug 24 11:47:34.514: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pod-network-test 08/24/23 11:47:34.516 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:47:34.549 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:47:34.56 + [BeforeEach] [sig-network] Networking test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 - [It] deployment should delete old replica sets [Conformance] - test/e2e/apps/deployment.go:122 - Jul 29 15:50:43.064: INFO: Pod name cleanup-pod: Found 0 pods out of 1 - Jul 29 15:50:48.070: INFO: Pod name cleanup-pod: Found 1 pods out of 1 - STEP: ensuring each pod is running 07/29/23 15:50:48.07 - Jul 29 15:50:48.071: INFO: Creating deployment test-cleanup-deployment - STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up 07/29/23 15:50:48.093 - [AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 - Jul 29 15:50:48.115: INFO: Deployment "test-cleanup-deployment": - &Deployment{ObjectMeta:{test-cleanup-deployment deployment-2801 a6d57545-22f4-4594-b7a4-ed473f313945 7340 1 2023-07-29 15:50:48 +0000 UTC map[name:cleanup-pod] map[] [] [] [{e2e.test Update apps/v1 2023-07-29 15:50:48 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00543a468 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:0,Replicas:0,UpdatedReplicas:0,AvailableReplicas:0,UnavailableReplicas:0,Conditions:[]DeploymentCondition{},ReadyReplicas:0,CollisionCount:nil,},} - - Jul 29 15:50:48.140: INFO: New ReplicaSet of Deployment "test-cleanup-deployment" is nil. - Jul 29 15:50:48.141: INFO: All old ReplicaSets of Deployment "test-cleanup-deployment": - Jul 29 15:50:48.141: INFO: &ReplicaSet{ObjectMeta:{test-cleanup-controller deployment-2801 19152dd1-ee70-44e7-8c5b-c80822cefb3c 7341 1 2023-07-29 15:50:43 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [{apps/v1 Deployment test-cleanup-deployment a6d57545-22f4-4594-b7a4-ed473f313945 0xc00543a78f 0xc00543a7a0}] [] [{e2e.test Update apps/v1 2023-07-29 15:50:43 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 15:50:44 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-07-29 15:50:48 +0000 UTC FieldsV1 {"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"a6d57545-22f4-4594-b7a4-ed473f313945\"}":{}}}} }]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc00543a868 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} - Jul 29 15:50:48.158: INFO: Pod "test-cleanup-controller-p6frw" is available: - &Pod{ObjectMeta:{test-cleanup-controller-p6frw test-cleanup-controller- deployment-2801 b6a4ef7f-bed8-4b50-ad9c-4d028346b756 7327 0 2023-07-29 15:50:43 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [{apps/v1 ReplicaSet test-cleanup-controller 19152dd1-ee70-44e7-8c5b-c80822cefb3c 0xc0054ccd7f 0xc0054ccd90}] [] [{kube-controller-manager Update v1 2023-07-29 15:50:43 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"19152dd1-ee70-44e7-8c5b-c80822cefb3c\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 15:50:44 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.73\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-wd58p,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wd58p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:50:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:50:44 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:50:44 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:50:43 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.73,StartTime:2023-07-29 15:50:43 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 15:50:44 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://17c4d05232eb09bae8cabf0489199b4306efd3dc6f94d31992bf2c92952cd7e9,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.73,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - [AfterEach] [sig-apps] Deployment + [It] should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:122 + STEP: Performing setup for networking test in namespace pod-network-test-9005 08/24/23 11:47:34.565 + STEP: creating a selector 08/24/23 11:47:34.566 + STEP: Creating the service pods in kubernetes 08/24/23 11:47:34.567 + Aug 24 11:47:34.568: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable + Aug 24 11:47:34.620: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-9005" to be "running and ready" + Aug 24 11:47:34.627: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 6.851551ms + Aug 24 11:47:34.628: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:47:36.638: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.017744014s + Aug 24 11:47:36.638: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:47:38.636: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.015512625s + Aug 24 11:47:38.636: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:47:40.637: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.017006785s + Aug 24 11:47:40.638: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:47:42.637: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.016525048s + Aug 24 11:47:42.637: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:47:44.639: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.018047301s + Aug 24 11:47:44.639: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:47:46.636: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.015275775s + Aug 24 11:47:46.636: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:47:48.637: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.016273993s + Aug 24 11:47:48.637: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:47:50.637: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.016177954s + Aug 24 11:47:50.637: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:47:52.638: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.017936834s + Aug 24 11:47:52.639: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:47:54.639: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.018778916s + Aug 24 11:47:54.639: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:47:56.639: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 22.018579242s + Aug 24 11:47:56.639: INFO: The phase of Pod netserver-0 is Running (Ready = true) + Aug 24 11:47:56.639: INFO: Pod "netserver-0" satisfied condition "running and ready" + Aug 24 11:47:56.647: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-9005" to be "running and ready" + Aug 24 11:47:56.654: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 6.893002ms + Aug 24 11:47:56.654: INFO: The phase of Pod netserver-1 is Running (Ready = true) + Aug 24 11:47:56.654: INFO: Pod "netserver-1" satisfied condition "running and ready" + Aug 24 11:47:56.660: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-9005" to be "running and ready" + Aug 24 11:47:56.666: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 5.280723ms + Aug 24 11:47:56.666: INFO: The phase of Pod netserver-2 is Running (Ready = true) + Aug 24 11:47:56.666: INFO: Pod "netserver-2" satisfied condition "running and ready" + STEP: Creating test pods 08/24/23 11:47:56.671 + Aug 24 11:47:56.702: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-9005" to be "running" + Aug 24 11:47:56.721: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 18.289454ms + Aug 24 11:47:58.728: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.025393799s + Aug 24 11:47:58.728: INFO: Pod "test-container-pod" satisfied condition "running" + Aug 24 11:47:58.733: INFO: Waiting up to 5m0s for pod "host-test-container-pod" in namespace "pod-network-test-9005" to be "running" + Aug 24 11:47:58.737: INFO: Pod "host-test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 3.751608ms + Aug 24 11:47:58.737: INFO: Pod "host-test-container-pod" satisfied condition "running" + Aug 24 11:47:58.741: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 + Aug 24 11:47:58.742: INFO: Going to poll 10.233.64.34 on port 8081 at least 0 times, with a maximum of 39 tries before failing + Aug 24 11:47:58.748: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.64.34 8081 | grep -v '^\s*$'] Namespace:pod-network-test-9005 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:47:58.748: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:47:58.749: INFO: ExecWithOptions: Clientset creation + Aug 24 11:47:58.749: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9005/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.64.34+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) + Aug 24 11:47:59.893: INFO: Found all 1 expected endpoints: [netserver-0] + Aug 24 11:47:59.893: INFO: Going to poll 10.233.65.44 on port 8081 at least 0 times, with a maximum of 39 tries before failing + Aug 24 11:47:59.902: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.65.44 8081 | grep -v '^\s*$'] Namespace:pod-network-test-9005 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:47:59.902: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:47:59.905: INFO: ExecWithOptions: Clientset creation + Aug 24 11:47:59.905: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9005/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.65.44+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) + Aug 24 11:48:01.046: INFO: Found all 1 expected endpoints: [netserver-1] + Aug 24 11:48:01.046: INFO: Going to poll 10.233.66.108 on port 8081 at least 0 times, with a maximum of 39 tries before failing + Aug 24 11:48:01.053: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.66.108 8081 | grep -v '^\s*$'] Namespace:pod-network-test-9005 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:48:01.053: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:48:01.056: INFO: ExecWithOptions: Clientset creation + Aug 24 11:48:01.056: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9005/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.66.108+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) + Aug 24 11:48:02.240: INFO: Found all 1 expected endpoints: [netserver-2] + [AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 - Jul 29 15:50:48.158: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Deployment + Aug 24 11:48:02.240: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Networking test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193 - STEP: Destroying namespace "deployment-2801" for this suite. 07/29/23 15:50:48.198 + STEP: Destroying namespace "pod-network-test-9005" for this suite. 08/24/23 11:48:02.249 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:84 -[BeforeEach] [sig-storage] Projected downwardAPI +[sig-storage] Subpath Atomic writer volumes + should support subpaths with secret pod [Conformance] + test/e2e/storage/subpath.go:60 +[BeforeEach] [sig-storage] Subpath set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:50:48.248 -Jul 29 15:50:48.248: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 15:50:48.251 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:48.343 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:48.349 -[BeforeEach] [sig-storage] Projected downwardAPI +STEP: Creating a kubernetes client 08/24/23 11:48:02.264 +Aug 24 11:48:02.264: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename subpath 08/24/23 11:48:02.266 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:48:02.297 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:48:02.301 +[BeforeEach] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:84 -STEP: Creating a pod to test downward API volume plugin 07/29/23 15:50:48.362 -Jul 29 15:50:48.391: INFO: Waiting up to 5m0s for pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6" in namespace "projected-9545" to be "Succeeded or Failed" -Jul 29 15:50:48.400: INFO: Pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6": Phase="Pending", Reason="", readiness=false. Elapsed: 9.491315ms -Jul 29 15:50:50.407: INFO: Pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6": Phase="Running", Reason="", readiness=true. Elapsed: 2.016573525s -Jul 29 15:50:52.411: INFO: Pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6": Phase="Running", Reason="", readiness=false. Elapsed: 4.02027417s -Jul 29 15:50:54.409: INFO: Pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.018056565s -STEP: Saw pod success 07/29/23 15:50:54.409 -Jul 29 15:50:54.409: INFO: Pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6" satisfied condition "Succeeded or Failed" -Jul 29 15:50:54.415: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6 container client-container: -STEP: delete the pod 07/29/23 15:50:54.429 -Jul 29 15:50:54.451: INFO: Waiting for pod downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6 to disappear -Jul 29 15:50:54.457: INFO: Pod downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +[BeforeEach] Atomic writer volumes + test/e2e/storage/subpath.go:40 +STEP: Setting up data 08/24/23 11:48:02.306 +[It] should support subpaths with secret pod [Conformance] + test/e2e/storage/subpath.go:60 +STEP: Creating pod pod-subpath-test-secret-cxg5 08/24/23 11:48:02.32 +STEP: Creating a pod to test atomic-volume-subpath 08/24/23 11:48:02.32 +Aug 24 11:48:02.331: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-cxg5" in namespace "subpath-9761" to be "Succeeded or Failed" +Aug 24 11:48:02.337: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Pending", Reason="", readiness=false. Elapsed: 5.316589ms +Aug 24 11:48:04.348: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 2.016715891s +Aug 24 11:48:06.345: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 4.01329935s +Aug 24 11:48:08.346: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 6.014706116s +Aug 24 11:48:10.346: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 8.01387875s +Aug 24 11:48:12.345: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 10.013308441s +Aug 24 11:48:14.352: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 12.020790809s +Aug 24 11:48:16.347: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 14.015538335s +Aug 24 11:48:18.344: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 16.012664788s +Aug 24 11:48:20.345: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 18.01328703s +Aug 24 11:48:22.345: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 20.013497619s +Aug 24 11:48:24.346: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 22.014156048s +Aug 24 11:48:26.347: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=false. Elapsed: 24.014988158s +Aug 24 11:48:28.346: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 26.013927887s +STEP: Saw pod success 08/24/23 11:48:28.346 +Aug 24 11:48:28.346: INFO: Pod "pod-subpath-test-secret-cxg5" satisfied condition "Succeeded or Failed" +Aug 24 11:48:28.352: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-subpath-test-secret-cxg5 container test-container-subpath-secret-cxg5: +STEP: delete the pod 08/24/23 11:48:28.383 +Aug 24 11:48:28.400: INFO: Waiting for pod pod-subpath-test-secret-cxg5 to disappear +Aug 24 11:48:28.404: INFO: Pod pod-subpath-test-secret-cxg5 no longer exists +STEP: Deleting pod pod-subpath-test-secret-cxg5 08/24/23 11:48:28.405 +Aug 24 11:48:28.405: INFO: Deleting pod "pod-subpath-test-secret-cxg5" in namespace "subpath-9761" +[AfterEach] [sig-storage] Subpath test/e2e/framework/node/init/init.go:32 -Jul 29 15:50:54.457: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +Aug 24 11:48:28.412: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-storage] Subpath dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-storage] Subpath tear down framework | framework.go:193 -STEP: Destroying namespace "projected-9545" for this suite. 07/29/23 15:50:54.467 +STEP: Destroying namespace "subpath-9761" for this suite. 08/24/23 11:48:28.423 ------------------------------ -• [SLOW TEST] [6.232 seconds] -[sig-storage] Projected downwardAPI -test/e2e/common/storage/framework.go:23 - should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:84 +• [SLOW TEST] [26.170 seconds] +[sig-storage] Subpath +test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + test/e2e/storage/subpath.go:36 + should support subpaths with secret pod [Conformance] + test/e2e/storage/subpath.go:60 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-storage] Subpath set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:50:48.248 - Jul 29 15:50:48.248: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 15:50:48.251 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:48.343 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:48.349 - [BeforeEach] [sig-storage] Projected downwardAPI + STEP: Creating a kubernetes client 08/24/23 11:48:02.264 + Aug 24 11:48:02.264: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename subpath 08/24/23 11:48:02.266 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:48:02.297 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:48:02.301 + [BeforeEach] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:84 - STEP: Creating a pod to test downward API volume plugin 07/29/23 15:50:48.362 - Jul 29 15:50:48.391: INFO: Waiting up to 5m0s for pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6" in namespace "projected-9545" to be "Succeeded or Failed" - Jul 29 15:50:48.400: INFO: Pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6": Phase="Pending", Reason="", readiness=false. Elapsed: 9.491315ms - Jul 29 15:50:50.407: INFO: Pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6": Phase="Running", Reason="", readiness=true. Elapsed: 2.016573525s - Jul 29 15:50:52.411: INFO: Pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6": Phase="Running", Reason="", readiness=false. Elapsed: 4.02027417s - Jul 29 15:50:54.409: INFO: Pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.018056565s - STEP: Saw pod success 07/29/23 15:50:54.409 - Jul 29 15:50:54.409: INFO: Pod "downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6" satisfied condition "Succeeded or Failed" - Jul 29 15:50:54.415: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6 container client-container: - STEP: delete the pod 07/29/23 15:50:54.429 - Jul 29 15:50:54.451: INFO: Waiting for pod downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6 to disappear - Jul 29 15:50:54.457: INFO: Pod downwardapi-volume-a89fde85-fe3e-47a0-a817-40b74cfa79c6 no longer exists - [AfterEach] [sig-storage] Projected downwardAPI + [BeforeEach] Atomic writer volumes + test/e2e/storage/subpath.go:40 + STEP: Setting up data 08/24/23 11:48:02.306 + [It] should support subpaths with secret pod [Conformance] + test/e2e/storage/subpath.go:60 + STEP: Creating pod pod-subpath-test-secret-cxg5 08/24/23 11:48:02.32 + STEP: Creating a pod to test atomic-volume-subpath 08/24/23 11:48:02.32 + Aug 24 11:48:02.331: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-cxg5" in namespace "subpath-9761" to be "Succeeded or Failed" + Aug 24 11:48:02.337: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Pending", Reason="", readiness=false. Elapsed: 5.316589ms + Aug 24 11:48:04.348: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 2.016715891s + Aug 24 11:48:06.345: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 4.01329935s + Aug 24 11:48:08.346: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 6.014706116s + Aug 24 11:48:10.346: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 8.01387875s + Aug 24 11:48:12.345: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 10.013308441s + Aug 24 11:48:14.352: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 12.020790809s + Aug 24 11:48:16.347: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 14.015538335s + Aug 24 11:48:18.344: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 16.012664788s + Aug 24 11:48:20.345: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 18.01328703s + Aug 24 11:48:22.345: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 20.013497619s + Aug 24 11:48:24.346: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=true. Elapsed: 22.014156048s + Aug 24 11:48:26.347: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Running", Reason="", readiness=false. Elapsed: 24.014988158s + Aug 24 11:48:28.346: INFO: Pod "pod-subpath-test-secret-cxg5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 26.013927887s + STEP: Saw pod success 08/24/23 11:48:28.346 + Aug 24 11:48:28.346: INFO: Pod "pod-subpath-test-secret-cxg5" satisfied condition "Succeeded or Failed" + Aug 24 11:48:28.352: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-subpath-test-secret-cxg5 container test-container-subpath-secret-cxg5: + STEP: delete the pod 08/24/23 11:48:28.383 + Aug 24 11:48:28.400: INFO: Waiting for pod pod-subpath-test-secret-cxg5 to disappear + Aug 24 11:48:28.404: INFO: Pod pod-subpath-test-secret-cxg5 no longer exists + STEP: Deleting pod pod-subpath-test-secret-cxg5 08/24/23 11:48:28.405 + Aug 24 11:48:28.405: INFO: Deleting pod "pod-subpath-test-secret-cxg5" in namespace "subpath-9761" + [AfterEach] [sig-storage] Subpath test/e2e/framework/node/init/init.go:32 - Jul 29 15:50:54.457: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 11:48:28.412: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-storage] Subpath dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-storage] Subpath tear down framework | framework.go:193 - STEP: Destroying namespace "projected-9545" for this suite. 07/29/23 15:50:54.467 + STEP: Destroying namespace "subpath-9761" for this suite. 08/24/23 11:48:28.423 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl describe - should check if kubectl describe prints relevant information for rc and pods [Conformance] - test/e2e/kubectl/kubectl.go:1276 -[BeforeEach] [sig-cli] Kubectl client +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: http [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:82 +[BeforeEach] [sig-network] Networking set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:50:54.48 -Jul 29 15:50:54.480: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 15:50:54.482 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:54.511 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:54.518 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 11:48:28.436 +Aug 24 11:48:28.436: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pod-network-test 08/24/23 11:48:28.441 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:48:28.473 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:48:28.479 +[BeforeEach] [sig-network] Networking test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[It] should check if kubectl describe prints relevant information for rc and pods [Conformance] - test/e2e/kubectl/kubectl.go:1276 -Jul 29 15:50:54.524: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 create -f -' -Jul 29 15:50:55.846: INFO: stderr: "" -Jul 29 15:50:55.846: INFO: stdout: "replicationcontroller/agnhost-primary created\n" -Jul 29 15:50:55.846: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 create -f -' -Jul 29 15:50:57.214: INFO: stderr: "" -Jul 29 15:50:57.214: INFO: stdout: "service/agnhost-primary created\n" -STEP: Waiting for Agnhost primary to start. 07/29/23 15:50:57.214 -Jul 29 15:50:58.234: INFO: Selector matched 1 pods for map[app:agnhost] -Jul 29 15:50:58.234: INFO: Found 1 / 1 -Jul 29 15:50:58.234: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 -Jul 29 15:50:58.240: INFO: Selector matched 1 pods for map[app:agnhost] -Jul 29 15:50:58.240: INFO: ForEach: Found 1 pods from the filter. Now looping through them. -Jul 29 15:50:58.241: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 describe pod agnhost-primary-lwb6h' -Jul 29 15:50:58.406: INFO: stderr: "" -Jul 29 15:50:58.406: INFO: stdout: "Name: agnhost-primary-lwb6h\nNamespace: kubectl-3903\nPriority: 0\nService Account: default\nNode: wetuj3nuajog-3/192.168.121.141\nStart Time: Sat, 29 Jul 2023 15:50:55 +0000\nLabels: app=agnhost\n role=primary\nAnnotations: \nStatus: Running\nIP: 10.233.66.62\nIPs:\n IP: 10.233.66.62\nControlled By: ReplicationController/agnhost-primary\nContainers:\n agnhost-primary:\n Container ID: cri-o://f01e9132568f530aa1b2511730f372ac63979dc4d701b04cbe31e6895464e29c\n Image: registry.k8s.io/e2e-test-images/agnhost:2.43\n Image ID: registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e\n Port: 6379/TCP\n Host Port: 0/TCP\n State: Running\n Started: Sat, 29 Jul 2023 15:50:56 +0000\n Ready: True\n Restart Count: 0\n Environment: \n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-d6cxv (ro)\nConditions:\n Type Status\n Initialized True \n Ready True \n ContainersReady True \n PodScheduled True \nVolumes:\n kube-api-access-d6cxv:\n Type: Projected (a volume that contains injected data from multiple sources)\n TokenExpirationSeconds: 3607\n ConfigMapName: kube-root-ca.crt\n ConfigMapOptional: \n DownwardAPI: true\nQoS Class: BestEffort\nNode-Selectors: \nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 3s default-scheduler Successfully assigned kubectl-3903/agnhost-primary-lwb6h to wetuj3nuajog-3\n Normal Pulled 2s kubelet Container image \"registry.k8s.io/e2e-test-images/agnhost:2.43\" already present on machine\n Normal Created 2s kubelet Created container agnhost-primary\n Normal Started 2s kubelet Started container agnhost-primary\n" -Jul 29 15:50:58.406: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 describe rc agnhost-primary' -Jul 29 15:50:58.615: INFO: stderr: "" -Jul 29 15:50:58.615: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-3903\nSelector: app=agnhost,role=primary\nLabels: app=agnhost\n role=primary\nAnnotations: \nReplicas: 1 current / 1 desired\nPods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n Labels: app=agnhost\n role=primary\n Containers:\n agnhost-primary:\n Image: registry.k8s.io/e2e-test-images/agnhost:2.43\n Port: 6379/TCP\n Host Port: 0/TCP\n Environment: \n Mounts: \n Volumes: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal SuccessfulCreate 3s replication-controller Created pod: agnhost-primary-lwb6h\n" -Jul 29 15:50:58.615: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 describe service agnhost-primary' -Jul 29 15:50:58.825: INFO: stderr: "" -Jul 29 15:50:58.825: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-3903\nLabels: app=agnhost\n role=primary\nAnnotations: \nSelector: app=agnhost,role=primary\nType: ClusterIP\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.233.3.5\nIPs: 10.233.3.5\nPort: 6379/TCP\nTargetPort: agnhost-server/TCP\nEndpoints: 10.233.66.62:6379\nSession Affinity: None\nEvents: \n" -Jul 29 15:50:58.837: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 describe node wetuj3nuajog-1' -Jul 29 15:50:59.093: INFO: stderr: "" -Jul 29 15:50:59.094: INFO: stdout: "Name: wetuj3nuajog-1\nRoles: control-plane\nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/os=linux\n kubernetes.io/arch=amd64\n kubernetes.io/hostname=wetuj3nuajog-1\n kubernetes.io/os=linux\n node-role.kubernetes.io/control-plane=\n node.kubernetes.io/exclude-from-external-load-balancers=\nAnnotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock\n node.alpha.kubernetes.io/ttl: 0\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Sat, 29 Jul 2023 15:13:38 +0000\nTaints: \nUnschedulable: false\nLease:\n HolderIdentity: wetuj3nuajog-1\n AcquireTime: \n RenewTime: Sat, 29 Jul 2023 15:50:55 +0000\nConditions:\n Type Status LastHeartbeatTime LastTransitionTime Reason Message\n ---- ------ ----------------- ------------------ ------ -------\n NetworkUnavailable False Sat, 29 Jul 2023 15:24:10 +0000 Sat, 29 Jul 2023 15:24:10 +0000 CiliumIsUp Cilium is running on this node\n MemoryPressure False Sat, 29 Jul 2023 15:46:49 +0000 Sat, 29 Jul 2023 15:13:29 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available\n DiskPressure False Sat, 29 Jul 2023 15:46:49 +0000 Sat, 29 Jul 2023 15:13:29 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure\n PIDPressure False Sat, 29 Jul 2023 15:46:49 +0000 Sat, 29 Jul 2023 15:13:29 +0000 KubeletHasSufficientPID kubelet has sufficient PID available\n Ready True Sat, 29 Jul 2023 15:46:49 +0000 Sat, 29 Jul 2023 15:25:14 +0000 KubeletReady kubelet is posting ready status. AppArmor enabled\nAddresses:\n InternalIP: 192.168.121.120\n Hostname: wetuj3nuajog-1\nCapacity:\n cpu: 2\n ephemeral-storage: 115008636Ki\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 8127904Ki\n pods: 110\n scheduling.k8s.io/foo: 5\nAllocatable:\n cpu: 1600m\n ephemeral-storage: 111880401014\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 3278240Ki\n pods: 110\n scheduling.k8s.io/foo: 5\nSystem Info:\n Machine ID: adcfc172491242be89bb77a77e71b30c\n System UUID: adcfc172-4912-42be-89bb-77a77e71b30c\n Boot ID: 2620ab4a-9ce6-4924-996f-a20a6fbe6041\n Kernel Version: 5.19.0-50-generic\n OS Image: Ubuntu 22.04.2 LTS\n Operating System: linux\n Architecture: amd64\n Container Runtime Version: cri-o://1.26.4\n Kubelet Version: v1.26.7\n Kube-Proxy Version: v1.26.7\nPodCIDR: 10.233.64.0/24\nPodCIDRs: 10.233.64.0/24\nNon-terminated Pods: (10 in total)\n Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age\n --------- ---- ------------ ---------- --------------- ------------- ---\n kube-system cilium-cdv47 100m (6%) 0 (0%) 100Mi (3%) 0 (0%) 27m\n kube-system cilium-node-init-jdrzm 100m (6%) 0 (0%) 100Mi (3%) 0 (0%) 27m\n kube-system coredns-787d4945fb-2xpvx 100m (6%) 0 (0%) 70Mi (2%) 170Mi (5%) 27m\n kube-system coredns-787d4945fb-clg7z 100m (6%) 0 (0%) 70Mi (2%) 170Mi (5%) 27m\n kube-system kube-addon-manager-wetuj3nuajog-1 5m (0%) 0 (0%) 50Mi (1%) 0 (0%) 28m\n kube-system kube-apiserver-wetuj3nuajog-1 250m (15%) 0 (0%) 0 (0%) 0 (0%) 37m\n kube-system kube-controller-manager-wetuj3nuajog-1 200m (12%) 0 (0%) 0 (0%) 0 (0%) 37m\n kube-system kube-proxy-zc9m8 0 (0%) 0 (0%) 0 (0%) 0 (0%) 37m\n kube-system kube-scheduler-wetuj3nuajog-1 100m (6%) 0 (0%) 0 (0%) 0 (0%) 37m\n sonobuoy sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r 0 (0%) 0 (0%) 0 (0%) 0 (0%) 21m\nAllocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n Resource Requests Limits\n -------- -------- ------\n cpu 955m (59%) 0 (0%)\n memory 390Mi (12%) 340Mi (10%)\n ephemeral-storage 0 (0%) 0 (0%)\n hugepages-1Gi 0 (0%) 0 (0%)\n hugepages-2Mi 0 (0%) 0 (0%)\n scheduling.k8s.io/foo 0 0\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Starting 36m kube-proxy \n Normal NodeHasSufficientMemory 37m (x7 over 37m) kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientMemory\n Normal NodeHasNoDiskPressure 37m (x6 over 37m) kubelet Node wetuj3nuajog-1 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientPID 37m (x6 over 37m) kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientPID\n Normal Starting 37m kubelet Starting kubelet.\n Normal NodeHasSufficientMemory 37m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientMemory\n Normal NodeHasNoDiskPressure 37m kubelet Node wetuj3nuajog-1 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientPID 37m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientPID\n Normal NodeNotReady 37m kubelet Node wetuj3nuajog-1 status is now: NodeNotReady\n Normal NodeReady 37m kubelet Node wetuj3nuajog-1 status is now: NodeReady\n Normal NodeAllocatableEnforced 37m kubelet Updated Node Allocatable limit across pods\n Normal RegisteredNode 37m node-controller Node wetuj3nuajog-1 event: Registered Node wetuj3nuajog-1 in Controller\n Normal NodeHasNoDiskPressure 36m kubelet Node wetuj3nuajog-1 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientMemory 36m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientMemory\n Normal Starting 36m kubelet Starting kubelet.\n Normal NodeHasSufficientPID 36m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientPID\n Normal NodeNotReady 36m kubelet Node wetuj3nuajog-1 status is now: NodeNotReady\n Normal NodeAllocatableEnforced 36m kubelet Updated Node Allocatable limit across pods\n Normal NodeReady 36m kubelet Node wetuj3nuajog-1 status is now: NodeReady\n Normal NodeHasNoDiskPressure 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientMemory 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientMemory\n Normal Starting 25m kubelet Starting kubelet.\n Normal NodeHasSufficientPID 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientPID\n Normal NodeNotReady 25m kubelet Node wetuj3nuajog-1 status is now: NodeNotReady\n Normal NodeAllocatableEnforced 25m kubelet Updated Node Allocatable limit across pods\n Normal Starting 25m kubelet Starting kubelet.\n Normal NodeHasSufficientMemory 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientMemory\n Normal NodeHasNoDiskPressure 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientPID 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientPID\n Normal NodeReady 25m kubelet Node wetuj3nuajog-1 status is now: NodeReady\n Normal NodeAllocatableEnforced 25m kubelet Updated Node Allocatable limit across pods\n" -Jul 29 15:50:59.095: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 describe namespace kubectl-3903' -Jul 29 15:50:59.266: INFO: stderr: "" -Jul 29 15:50:59.266: INFO: stdout: "Name: kubectl-3903\nLabels: e2e-framework=kubectl\n e2e-run=d0d188fc-d094-4f2b-8739-c618e26462b8\n kubernetes.io/metadata.name=kubectl-3903\n pod-security.kubernetes.io/enforce=baseline\nAnnotations: \nStatus: Active\n\nNo resource quota.\n\nNo LimitRange resource.\n" -[AfterEach] [sig-cli] Kubectl client +[It] should function for intra-pod communication: http [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:82 +STEP: Performing setup for networking test in namespace pod-network-test-7785 08/24/23 11:48:28.484 +STEP: creating a selector 08/24/23 11:48:28.484 +STEP: Creating the service pods in kubernetes 08/24/23 11:48:28.485 +Aug 24 11:48:28.485: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Aug 24 11:48:28.538: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-7785" to be "running and ready" +Aug 24 11:48:28.562: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 24.050126ms +Aug 24 11:48:28.562: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:48:30.571: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.032510686s +Aug 24 11:48:30.571: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:48:32.571: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.032401252s +Aug 24 11:48:32.571: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:48:34.572: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.03419807s +Aug 24 11:48:34.573: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:48:36.573: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.034500771s +Aug 24 11:48:36.573: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:48:38.572: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.034163006s +Aug 24 11:48:38.572: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:48:40.570: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.031581291s +Aug 24 11:48:40.570: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:48:42.573: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.034929922s +Aug 24 11:48:42.573: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:48:44.571: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.033036196s +Aug 24 11:48:44.571: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:48:46.569: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.030866902s +Aug 24 11:48:46.569: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:48:48.571: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.03285701s +Aug 24 11:48:48.571: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 11:48:50.570: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 22.031853274s +Aug 24 11:48:50.570: INFO: The phase of Pod netserver-0 is Running (Ready = true) +Aug 24 11:48:50.570: INFO: Pod "netserver-0" satisfied condition "running and ready" +Aug 24 11:48:50.575: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-7785" to be "running and ready" +Aug 24 11:48:50.580: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 4.616506ms +Aug 24 11:48:50.580: INFO: The phase of Pod netserver-1 is Running (Ready = true) +Aug 24 11:48:50.580: INFO: Pod "netserver-1" satisfied condition "running and ready" +Aug 24 11:48:50.588: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-7785" to be "running and ready" +Aug 24 11:48:50.595: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 7.076866ms +Aug 24 11:48:50.595: INFO: The phase of Pod netserver-2 is Running (Ready = true) +Aug 24 11:48:50.596: INFO: Pod "netserver-2" satisfied condition "running and ready" +STEP: Creating test pods 08/24/23 11:48:50.602 +Aug 24 11:48:50.615: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-7785" to be "running" +Aug 24 11:48:50.622: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.830877ms +Aug 24 11:48:52.631: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.015465032s +Aug 24 11:48:52.631: INFO: Pod "test-container-pod" satisfied condition "running" +Aug 24 11:48:52.637: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 +Aug 24 11:48:52.637: INFO: Breadth first check of 10.233.64.60 on host 192.168.121.127... +Aug 24 11:48:52.646: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.45:9080/dial?request=hostname&protocol=http&host=10.233.64.60&port=8083&tries=1'] Namespace:pod-network-test-7785 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:48:52.646: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:48:52.648: INFO: ExecWithOptions: Clientset creation +Aug 24 11:48:52.649: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7785/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.45%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.64.60%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) +Aug 24 11:48:52.853: INFO: Waiting for responses: map[] +Aug 24 11:48:52.853: INFO: reached 10.233.64.60 after 0/1 tries +Aug 24 11:48:52.853: INFO: Breadth first check of 10.233.65.123 on host 192.168.121.111... +Aug 24 11:48:52.860: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.45:9080/dial?request=hostname&protocol=http&host=10.233.65.123&port=8083&tries=1'] Namespace:pod-network-test-7785 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:48:52.860: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:48:52.862: INFO: ExecWithOptions: Clientset creation +Aug 24 11:48:52.863: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7785/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.45%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.65.123%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) +Aug 24 11:48:53.020: INFO: Waiting for responses: map[] +Aug 24 11:48:53.020: INFO: reached 10.233.65.123 after 0/1 tries +Aug 24 11:48:53.021: INFO: Breadth first check of 10.233.66.207 on host 192.168.121.130... +Aug 24 11:48:53.028: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.45:9080/dial?request=hostname&protocol=http&host=10.233.66.207&port=8083&tries=1'] Namespace:pod-network-test-7785 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:48:53.028: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:48:53.030: INFO: ExecWithOptions: Clientset creation +Aug 24 11:48:53.030: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7785/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.45%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.66.207%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) +Aug 24 11:48:53.159: INFO: Waiting for responses: map[] +Aug 24 11:48:53.161: INFO: reached 10.233.66.207 after 0/1 tries +Aug 24 11:48:53.161: INFO: Going to retry 0 out of 3 pods.... +[AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 -Jul 29 15:50:59.267: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 11:48:53.161: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Networking test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-3903" for this suite. 07/29/23 15:50:59.283 +STEP: Destroying namespace "pod-network-test-7785" for this suite. 08/24/23 11:48:53.173 ------------------------------ -• [4.821 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Kubectl describe - test/e2e/kubectl/kubectl.go:1270 - should check if kubectl describe prints relevant information for rc and pods [Conformance] - test/e2e/kubectl/kubectl.go:1276 +• [SLOW TEST] [24.747 seconds] +[sig-network] Networking +test/e2e/common/network/framework.go:23 + Granular Checks: Pods + test/e2e/common/network/networking.go:32 + should function for intra-pod communication: http [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:82 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-network] Networking set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:50:54.48 - Jul 29 15:50:54.480: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 15:50:54.482 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:54.511 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:54.518 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 11:48:28.436 + Aug 24 11:48:28.436: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pod-network-test 08/24/23 11:48:28.441 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:48:28.473 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:48:28.479 + [BeforeEach] [sig-network] Networking test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [It] should check if kubectl describe prints relevant information for rc and pods [Conformance] - test/e2e/kubectl/kubectl.go:1276 - Jul 29 15:50:54.524: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 create -f -' - Jul 29 15:50:55.846: INFO: stderr: "" - Jul 29 15:50:55.846: INFO: stdout: "replicationcontroller/agnhost-primary created\n" - Jul 29 15:50:55.846: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 create -f -' - Jul 29 15:50:57.214: INFO: stderr: "" - Jul 29 15:50:57.214: INFO: stdout: "service/agnhost-primary created\n" - STEP: Waiting for Agnhost primary to start. 07/29/23 15:50:57.214 - Jul 29 15:50:58.234: INFO: Selector matched 1 pods for map[app:agnhost] - Jul 29 15:50:58.234: INFO: Found 1 / 1 - Jul 29 15:50:58.234: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 - Jul 29 15:50:58.240: INFO: Selector matched 1 pods for map[app:agnhost] - Jul 29 15:50:58.240: INFO: ForEach: Found 1 pods from the filter. Now looping through them. - Jul 29 15:50:58.241: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 describe pod agnhost-primary-lwb6h' - Jul 29 15:50:58.406: INFO: stderr: "" - Jul 29 15:50:58.406: INFO: stdout: "Name: agnhost-primary-lwb6h\nNamespace: kubectl-3903\nPriority: 0\nService Account: default\nNode: wetuj3nuajog-3/192.168.121.141\nStart Time: Sat, 29 Jul 2023 15:50:55 +0000\nLabels: app=agnhost\n role=primary\nAnnotations: \nStatus: Running\nIP: 10.233.66.62\nIPs:\n IP: 10.233.66.62\nControlled By: ReplicationController/agnhost-primary\nContainers:\n agnhost-primary:\n Container ID: cri-o://f01e9132568f530aa1b2511730f372ac63979dc4d701b04cbe31e6895464e29c\n Image: registry.k8s.io/e2e-test-images/agnhost:2.43\n Image ID: registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e\n Port: 6379/TCP\n Host Port: 0/TCP\n State: Running\n Started: Sat, 29 Jul 2023 15:50:56 +0000\n Ready: True\n Restart Count: 0\n Environment: \n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-d6cxv (ro)\nConditions:\n Type Status\n Initialized True \n Ready True \n ContainersReady True \n PodScheduled True \nVolumes:\n kube-api-access-d6cxv:\n Type: Projected (a volume that contains injected data from multiple sources)\n TokenExpirationSeconds: 3607\n ConfigMapName: kube-root-ca.crt\n ConfigMapOptional: \n DownwardAPI: true\nQoS Class: BestEffort\nNode-Selectors: \nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 3s default-scheduler Successfully assigned kubectl-3903/agnhost-primary-lwb6h to wetuj3nuajog-3\n Normal Pulled 2s kubelet Container image \"registry.k8s.io/e2e-test-images/agnhost:2.43\" already present on machine\n Normal Created 2s kubelet Created container agnhost-primary\n Normal Started 2s kubelet Started container agnhost-primary\n" - Jul 29 15:50:58.406: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 describe rc agnhost-primary' - Jul 29 15:50:58.615: INFO: stderr: "" - Jul 29 15:50:58.615: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-3903\nSelector: app=agnhost,role=primary\nLabels: app=agnhost\n role=primary\nAnnotations: \nReplicas: 1 current / 1 desired\nPods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n Labels: app=agnhost\n role=primary\n Containers:\n agnhost-primary:\n Image: registry.k8s.io/e2e-test-images/agnhost:2.43\n Port: 6379/TCP\n Host Port: 0/TCP\n Environment: \n Mounts: \n Volumes: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal SuccessfulCreate 3s replication-controller Created pod: agnhost-primary-lwb6h\n" - Jul 29 15:50:58.615: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 describe service agnhost-primary' - Jul 29 15:50:58.825: INFO: stderr: "" - Jul 29 15:50:58.825: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-3903\nLabels: app=agnhost\n role=primary\nAnnotations: \nSelector: app=agnhost,role=primary\nType: ClusterIP\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.233.3.5\nIPs: 10.233.3.5\nPort: 6379/TCP\nTargetPort: agnhost-server/TCP\nEndpoints: 10.233.66.62:6379\nSession Affinity: None\nEvents: \n" - Jul 29 15:50:58.837: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 describe node wetuj3nuajog-1' - Jul 29 15:50:59.093: INFO: stderr: "" - Jul 29 15:50:59.094: INFO: stdout: "Name: wetuj3nuajog-1\nRoles: control-plane\nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/os=linux\n kubernetes.io/arch=amd64\n kubernetes.io/hostname=wetuj3nuajog-1\n kubernetes.io/os=linux\n node-role.kubernetes.io/control-plane=\n node.kubernetes.io/exclude-from-external-load-balancers=\nAnnotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock\n node.alpha.kubernetes.io/ttl: 0\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Sat, 29 Jul 2023 15:13:38 +0000\nTaints: \nUnschedulable: false\nLease:\n HolderIdentity: wetuj3nuajog-1\n AcquireTime: \n RenewTime: Sat, 29 Jul 2023 15:50:55 +0000\nConditions:\n Type Status LastHeartbeatTime LastTransitionTime Reason Message\n ---- ------ ----------------- ------------------ ------ -------\n NetworkUnavailable False Sat, 29 Jul 2023 15:24:10 +0000 Sat, 29 Jul 2023 15:24:10 +0000 CiliumIsUp Cilium is running on this node\n MemoryPressure False Sat, 29 Jul 2023 15:46:49 +0000 Sat, 29 Jul 2023 15:13:29 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available\n DiskPressure False Sat, 29 Jul 2023 15:46:49 +0000 Sat, 29 Jul 2023 15:13:29 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure\n PIDPressure False Sat, 29 Jul 2023 15:46:49 +0000 Sat, 29 Jul 2023 15:13:29 +0000 KubeletHasSufficientPID kubelet has sufficient PID available\n Ready True Sat, 29 Jul 2023 15:46:49 +0000 Sat, 29 Jul 2023 15:25:14 +0000 KubeletReady kubelet is posting ready status. AppArmor enabled\nAddresses:\n InternalIP: 192.168.121.120\n Hostname: wetuj3nuajog-1\nCapacity:\n cpu: 2\n ephemeral-storage: 115008636Ki\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 8127904Ki\n pods: 110\n scheduling.k8s.io/foo: 5\nAllocatable:\n cpu: 1600m\n ephemeral-storage: 111880401014\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 3278240Ki\n pods: 110\n scheduling.k8s.io/foo: 5\nSystem Info:\n Machine ID: adcfc172491242be89bb77a77e71b30c\n System UUID: adcfc172-4912-42be-89bb-77a77e71b30c\n Boot ID: 2620ab4a-9ce6-4924-996f-a20a6fbe6041\n Kernel Version: 5.19.0-50-generic\n OS Image: Ubuntu 22.04.2 LTS\n Operating System: linux\n Architecture: amd64\n Container Runtime Version: cri-o://1.26.4\n Kubelet Version: v1.26.7\n Kube-Proxy Version: v1.26.7\nPodCIDR: 10.233.64.0/24\nPodCIDRs: 10.233.64.0/24\nNon-terminated Pods: (10 in total)\n Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age\n --------- ---- ------------ ---------- --------------- ------------- ---\n kube-system cilium-cdv47 100m (6%) 0 (0%) 100Mi (3%) 0 (0%) 27m\n kube-system cilium-node-init-jdrzm 100m (6%) 0 (0%) 100Mi (3%) 0 (0%) 27m\n kube-system coredns-787d4945fb-2xpvx 100m (6%) 0 (0%) 70Mi (2%) 170Mi (5%) 27m\n kube-system coredns-787d4945fb-clg7z 100m (6%) 0 (0%) 70Mi (2%) 170Mi (5%) 27m\n kube-system kube-addon-manager-wetuj3nuajog-1 5m (0%) 0 (0%) 50Mi (1%) 0 (0%) 28m\n kube-system kube-apiserver-wetuj3nuajog-1 250m (15%) 0 (0%) 0 (0%) 0 (0%) 37m\n kube-system kube-controller-manager-wetuj3nuajog-1 200m (12%) 0 (0%) 0 (0%) 0 (0%) 37m\n kube-system kube-proxy-zc9m8 0 (0%) 0 (0%) 0 (0%) 0 (0%) 37m\n kube-system kube-scheduler-wetuj3nuajog-1 100m (6%) 0 (0%) 0 (0%) 0 (0%) 37m\n sonobuoy sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r 0 (0%) 0 (0%) 0 (0%) 0 (0%) 21m\nAllocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n Resource Requests Limits\n -------- -------- ------\n cpu 955m (59%) 0 (0%)\n memory 390Mi (12%) 340Mi (10%)\n ephemeral-storage 0 (0%) 0 (0%)\n hugepages-1Gi 0 (0%) 0 (0%)\n hugepages-2Mi 0 (0%) 0 (0%)\n scheduling.k8s.io/foo 0 0\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Starting 36m kube-proxy \n Normal NodeHasSufficientMemory 37m (x7 over 37m) kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientMemory\n Normal NodeHasNoDiskPressure 37m (x6 over 37m) kubelet Node wetuj3nuajog-1 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientPID 37m (x6 over 37m) kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientPID\n Normal Starting 37m kubelet Starting kubelet.\n Normal NodeHasSufficientMemory 37m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientMemory\n Normal NodeHasNoDiskPressure 37m kubelet Node wetuj3nuajog-1 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientPID 37m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientPID\n Normal NodeNotReady 37m kubelet Node wetuj3nuajog-1 status is now: NodeNotReady\n Normal NodeReady 37m kubelet Node wetuj3nuajog-1 status is now: NodeReady\n Normal NodeAllocatableEnforced 37m kubelet Updated Node Allocatable limit across pods\n Normal RegisteredNode 37m node-controller Node wetuj3nuajog-1 event: Registered Node wetuj3nuajog-1 in Controller\n Normal NodeHasNoDiskPressure 36m kubelet Node wetuj3nuajog-1 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientMemory 36m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientMemory\n Normal Starting 36m kubelet Starting kubelet.\n Normal NodeHasSufficientPID 36m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientPID\n Normal NodeNotReady 36m kubelet Node wetuj3nuajog-1 status is now: NodeNotReady\n Normal NodeAllocatableEnforced 36m kubelet Updated Node Allocatable limit across pods\n Normal NodeReady 36m kubelet Node wetuj3nuajog-1 status is now: NodeReady\n Normal NodeHasNoDiskPressure 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientMemory 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientMemory\n Normal Starting 25m kubelet Starting kubelet.\n Normal NodeHasSufficientPID 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientPID\n Normal NodeNotReady 25m kubelet Node wetuj3nuajog-1 status is now: NodeNotReady\n Normal NodeAllocatableEnforced 25m kubelet Updated Node Allocatable limit across pods\n Normal Starting 25m kubelet Starting kubelet.\n Normal NodeHasSufficientMemory 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientMemory\n Normal NodeHasNoDiskPressure 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientPID 25m kubelet Node wetuj3nuajog-1 status is now: NodeHasSufficientPID\n Normal NodeReady 25m kubelet Node wetuj3nuajog-1 status is now: NodeReady\n Normal NodeAllocatableEnforced 25m kubelet Updated Node Allocatable limit across pods\n" - Jul 29 15:50:59.095: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3903 describe namespace kubectl-3903' - Jul 29 15:50:59.266: INFO: stderr: "" - Jul 29 15:50:59.266: INFO: stdout: "Name: kubectl-3903\nLabels: e2e-framework=kubectl\n e2e-run=d0d188fc-d094-4f2b-8739-c618e26462b8\n kubernetes.io/metadata.name=kubectl-3903\n pod-security.kubernetes.io/enforce=baseline\nAnnotations: \nStatus: Active\n\nNo resource quota.\n\nNo LimitRange resource.\n" - [AfterEach] [sig-cli] Kubectl client + [It] should function for intra-pod communication: http [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:82 + STEP: Performing setup for networking test in namespace pod-network-test-7785 08/24/23 11:48:28.484 + STEP: creating a selector 08/24/23 11:48:28.484 + STEP: Creating the service pods in kubernetes 08/24/23 11:48:28.485 + Aug 24 11:48:28.485: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable + Aug 24 11:48:28.538: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-7785" to be "running and ready" + Aug 24 11:48:28.562: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 24.050126ms + Aug 24 11:48:28.562: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:48:30.571: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.032510686s + Aug 24 11:48:30.571: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:48:32.571: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.032401252s + Aug 24 11:48:32.571: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:48:34.572: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.03419807s + Aug 24 11:48:34.573: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:48:36.573: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.034500771s + Aug 24 11:48:36.573: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:48:38.572: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.034163006s + Aug 24 11:48:38.572: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:48:40.570: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.031581291s + Aug 24 11:48:40.570: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:48:42.573: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.034929922s + Aug 24 11:48:42.573: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:48:44.571: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.033036196s + Aug 24 11:48:44.571: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:48:46.569: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.030866902s + Aug 24 11:48:46.569: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:48:48.571: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.03285701s + Aug 24 11:48:48.571: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 11:48:50.570: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 22.031853274s + Aug 24 11:48:50.570: INFO: The phase of Pod netserver-0 is Running (Ready = true) + Aug 24 11:48:50.570: INFO: Pod "netserver-0" satisfied condition "running and ready" + Aug 24 11:48:50.575: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-7785" to be "running and ready" + Aug 24 11:48:50.580: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 4.616506ms + Aug 24 11:48:50.580: INFO: The phase of Pod netserver-1 is Running (Ready = true) + Aug 24 11:48:50.580: INFO: Pod "netserver-1" satisfied condition "running and ready" + Aug 24 11:48:50.588: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-7785" to be "running and ready" + Aug 24 11:48:50.595: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 7.076866ms + Aug 24 11:48:50.595: INFO: The phase of Pod netserver-2 is Running (Ready = true) + Aug 24 11:48:50.596: INFO: Pod "netserver-2" satisfied condition "running and ready" + STEP: Creating test pods 08/24/23 11:48:50.602 + Aug 24 11:48:50.615: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-7785" to be "running" + Aug 24 11:48:50.622: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.830877ms + Aug 24 11:48:52.631: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.015465032s + Aug 24 11:48:52.631: INFO: Pod "test-container-pod" satisfied condition "running" + Aug 24 11:48:52.637: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 + Aug 24 11:48:52.637: INFO: Breadth first check of 10.233.64.60 on host 192.168.121.127... + Aug 24 11:48:52.646: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.45:9080/dial?request=hostname&protocol=http&host=10.233.64.60&port=8083&tries=1'] Namespace:pod-network-test-7785 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:48:52.646: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:48:52.648: INFO: ExecWithOptions: Clientset creation + Aug 24 11:48:52.649: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7785/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.45%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.64.60%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) + Aug 24 11:48:52.853: INFO: Waiting for responses: map[] + Aug 24 11:48:52.853: INFO: reached 10.233.64.60 after 0/1 tries + Aug 24 11:48:52.853: INFO: Breadth first check of 10.233.65.123 on host 192.168.121.111... + Aug 24 11:48:52.860: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.45:9080/dial?request=hostname&protocol=http&host=10.233.65.123&port=8083&tries=1'] Namespace:pod-network-test-7785 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:48:52.860: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:48:52.862: INFO: ExecWithOptions: Clientset creation + Aug 24 11:48:52.863: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7785/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.45%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.65.123%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) + Aug 24 11:48:53.020: INFO: Waiting for responses: map[] + Aug 24 11:48:53.020: INFO: reached 10.233.65.123 after 0/1 tries + Aug 24 11:48:53.021: INFO: Breadth first check of 10.233.66.207 on host 192.168.121.130... + Aug 24 11:48:53.028: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.45:9080/dial?request=hostname&protocol=http&host=10.233.66.207&port=8083&tries=1'] Namespace:pod-network-test-7785 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:48:53.028: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:48:53.030: INFO: ExecWithOptions: Clientset creation + Aug 24 11:48:53.030: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7785/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.45%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.66.207%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) + Aug 24 11:48:53.159: INFO: Waiting for responses: map[] + Aug 24 11:48:53.161: INFO: reached 10.233.66.207 after 0/1 tries + Aug 24 11:48:53.161: INFO: Going to retry 0 out of 3 pods.... + [AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 - Jul 29 15:50:59.267: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 11:48:53.161: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Networking test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-3903" for this suite. 07/29/23 15:50:59.283 + STEP: Destroying namespace "pod-network-test-7785" for this suite. 08/24/23 11:48:53.173 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should be able to deny pod and configmap creation [Conformance] - test/e2e/apimachinery/webhook.go:197 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-node] NoExecuteTaintManager Multiple Pods [Serial] + evicts pods with minTolerationSeconds [Disruptive] [Conformance] + test/e2e/node/taints.go:455 +[BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:50:59.302 -Jul 29 15:50:59.302: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 15:50:59.311 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:59.363 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:59.37 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 11:48:53.188 +Aug 24 11:48:53.188: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename taint-multiple-pods 08/24/23 11:48:53.19 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:48:53.218 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:48:53.221 +[BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 15:50:59.418 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 15:51:00.35 -STEP: Deploying the webhook pod 07/29/23 15:51:00.369 -STEP: Wait for the deployment to be ready 07/29/23 15:51:00.407 -Jul 29 15:51:00.432: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service 07/29/23 15:51:02.452 -STEP: Verifying the service has paired with the endpoint 07/29/23 15:51:02.476 -Jul 29 15:51:03.476: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should be able to deny pod and configmap creation [Conformance] - test/e2e/apimachinery/webhook.go:197 -STEP: Registering the webhook via the AdmissionRegistration API 07/29/23 15:51:03.497 -STEP: create a pod that should be denied by the webhook 07/29/23 15:51:03.538 -STEP: create a pod that causes the webhook to hang 07/29/23 15:51:03.579 -STEP: create a configmap that should be denied by the webhook 07/29/23 15:51:13.592 -STEP: create a configmap that should be admitted by the webhook 07/29/23 15:51:13.651 -STEP: update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook 07/29/23 15:51:13.671 -STEP: update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook 07/29/23 15:51:13.686 -STEP: create a namespace that bypass the webhook 07/29/23 15:51:13.698 -STEP: create a configmap that violates the webhook policy but is in a whitelisted namespace 07/29/23 15:51:13.711 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + test/e2e/node/taints.go:383 +Aug 24 11:48:53.226: INFO: Waiting up to 1m0s for all nodes to be ready +Aug 24 11:49:53.269: INFO: Waiting for terminating namespaces to be deleted... +[It] evicts pods with minTolerationSeconds [Disruptive] [Conformance] + test/e2e/node/taints.go:455 +Aug 24 11:49:53.274: INFO: Starting informer... +STEP: Starting pods... 08/24/23 11:49:53.274 +Aug 24 11:49:53.511: INFO: Pod1 is running on pe9deep4seen-3. Tainting Node +Aug 24 11:49:53.728: INFO: Waiting up to 5m0s for pod "taint-eviction-b1" in namespace "taint-multiple-pods-428" to be "running" +Aug 24 11:49:53.734: INFO: Pod "taint-eviction-b1": Phase="Pending", Reason="", readiness=false. Elapsed: 6.02959ms +Aug 24 11:49:55.743: INFO: Pod "taint-eviction-b1": Phase="Running", Reason="", readiness=true. Elapsed: 2.014328611s +Aug 24 11:49:55.743: INFO: Pod "taint-eviction-b1" satisfied condition "running" +Aug 24 11:49:55.743: INFO: Waiting up to 5m0s for pod "taint-eviction-b2" in namespace "taint-multiple-pods-428" to be "running" +Aug 24 11:49:55.747: INFO: Pod "taint-eviction-b2": Phase="Running", Reason="", readiness=true. Elapsed: 4.708448ms +Aug 24 11:49:55.748: INFO: Pod "taint-eviction-b2" satisfied condition "running" +Aug 24 11:49:55.748: INFO: Pod2 is running on pe9deep4seen-3. Tainting Node +STEP: Trying to apply a taint on the Node 08/24/23 11:49:55.748 +STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 08/24/23 11:49:55.771 +STEP: Waiting for Pod1 and Pod2 to be deleted 08/24/23 11:49:55.778 +Aug 24 11:50:01.712: INFO: Noticed Pod "taint-eviction-b1" gets evicted. +Aug 24 11:50:21.771: INFO: Noticed Pod "taint-eviction-b2" gets evicted. +STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 08/24/23 11:50:21.797 +[AfterEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 15:51:13.781: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 11:50:21.803: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-6273" for this suite. 07/29/23 15:51:13.874 -STEP: Destroying namespace "webhook-6273-markers" for this suite. 07/29/23 15:51:13.903 +STEP: Destroying namespace "taint-multiple-pods-428" for this suite. 08/24/23 11:50:21.814 ------------------------------ -• [SLOW TEST] [14.618 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should be able to deny pod and configmap creation [Conformance] - test/e2e/apimachinery/webhook.go:197 +• [SLOW TEST] [88.640 seconds] +[sig-node] NoExecuteTaintManager Multiple Pods [Serial] +test/e2e/node/framework.go:23 + evicts pods with minTolerationSeconds [Disruptive] [Conformance] + test/e2e/node/taints.go:455 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:50:59.302 - Jul 29 15:50:59.302: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 15:50:59.311 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:50:59.363 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:50:59.37 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 11:48:53.188 + Aug 24 11:48:53.188: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename taint-multiple-pods 08/24/23 11:48:53.19 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:48:53.218 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:48:53.221 + [BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 15:50:59.418 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 15:51:00.35 - STEP: Deploying the webhook pod 07/29/23 15:51:00.369 - STEP: Wait for the deployment to be ready 07/29/23 15:51:00.407 - Jul 29 15:51:00.432: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set - STEP: Deploying the webhook service 07/29/23 15:51:02.452 - STEP: Verifying the service has paired with the endpoint 07/29/23 15:51:02.476 - Jul 29 15:51:03.476: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should be able to deny pod and configmap creation [Conformance] - test/e2e/apimachinery/webhook.go:197 - STEP: Registering the webhook via the AdmissionRegistration API 07/29/23 15:51:03.497 - STEP: create a pod that should be denied by the webhook 07/29/23 15:51:03.538 - STEP: create a pod that causes the webhook to hang 07/29/23 15:51:03.579 - STEP: create a configmap that should be denied by the webhook 07/29/23 15:51:13.592 - STEP: create a configmap that should be admitted by the webhook 07/29/23 15:51:13.651 - STEP: update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook 07/29/23 15:51:13.671 - STEP: update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook 07/29/23 15:51:13.686 - STEP: create a namespace that bypass the webhook 07/29/23 15:51:13.698 - STEP: create a configmap that violates the webhook policy but is in a whitelisted namespace 07/29/23 15:51:13.711 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + test/e2e/node/taints.go:383 + Aug 24 11:48:53.226: INFO: Waiting up to 1m0s for all nodes to be ready + Aug 24 11:49:53.269: INFO: Waiting for terminating namespaces to be deleted... + [It] evicts pods with minTolerationSeconds [Disruptive] [Conformance] + test/e2e/node/taints.go:455 + Aug 24 11:49:53.274: INFO: Starting informer... + STEP: Starting pods... 08/24/23 11:49:53.274 + Aug 24 11:49:53.511: INFO: Pod1 is running on pe9deep4seen-3. Tainting Node + Aug 24 11:49:53.728: INFO: Waiting up to 5m0s for pod "taint-eviction-b1" in namespace "taint-multiple-pods-428" to be "running" + Aug 24 11:49:53.734: INFO: Pod "taint-eviction-b1": Phase="Pending", Reason="", readiness=false. Elapsed: 6.02959ms + Aug 24 11:49:55.743: INFO: Pod "taint-eviction-b1": Phase="Running", Reason="", readiness=true. Elapsed: 2.014328611s + Aug 24 11:49:55.743: INFO: Pod "taint-eviction-b1" satisfied condition "running" + Aug 24 11:49:55.743: INFO: Waiting up to 5m0s for pod "taint-eviction-b2" in namespace "taint-multiple-pods-428" to be "running" + Aug 24 11:49:55.747: INFO: Pod "taint-eviction-b2": Phase="Running", Reason="", readiness=true. Elapsed: 4.708448ms + Aug 24 11:49:55.748: INFO: Pod "taint-eviction-b2" satisfied condition "running" + Aug 24 11:49:55.748: INFO: Pod2 is running on pe9deep4seen-3. Tainting Node + STEP: Trying to apply a taint on the Node 08/24/23 11:49:55.748 + STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 08/24/23 11:49:55.771 + STEP: Waiting for Pod1 and Pod2 to be deleted 08/24/23 11:49:55.778 + Aug 24 11:50:01.712: INFO: Noticed Pod "taint-eviction-b1" gets evicted. + Aug 24 11:50:21.771: INFO: Noticed Pod "taint-eviction-b2" gets evicted. + STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 08/24/23 11:50:21.797 + [AfterEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 15:51:13.781: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 11:50:21.803: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-6273" for this suite. 07/29/23 15:51:13.874 - STEP: Destroying namespace "webhook-6273-markers" for this suite. 07/29/23 15:51:13.903 + STEP: Destroying namespace "taint-multiple-pods-428" for this suite. 08/24/23 11:50:21.814 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] ConfigMap - optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:240 -[BeforeEach] [sig-storage] ConfigMap +[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop exec hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:151 +[BeforeEach] [sig-node] Container Lifecycle Hook set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:51:13.927 -Jul 29 15:51:13.927: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 15:51:13.947 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:13.98 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:13.988 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 11:50:21.83 +Aug 24 11:50:21.831: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-lifecycle-hook 08/24/23 11:50:21.833 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:21.866 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:21.87 +[BeforeEach] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:31 -[It] optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:240 -STEP: Creating configMap with name cm-test-opt-del-7731fb57-e8b3-4fc5-8c0e-2f4e20f77009 07/29/23 15:51:14.009 -STEP: Creating configMap with name cm-test-opt-upd-a3bf128b-1baa-45bd-a44b-b176096396b7 07/29/23 15:51:14.02 -STEP: Creating the pod 07/29/23 15:51:14.033 -Jul 29 15:51:14.057: INFO: Waiting up to 5m0s for pod "pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361" in namespace "configmap-1316" to be "running and ready" -Jul 29 15:51:14.064: INFO: Pod "pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361": Phase="Pending", Reason="", readiness=false. Elapsed: 5.856181ms -Jul 29 15:51:14.064: INFO: The phase of Pod pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:51:16.070: INFO: Pod "pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361": Phase="Running", Reason="", readiness=true. Elapsed: 2.012605293s -Jul 29 15:51:16.070: INFO: The phase of Pod pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361 is Running (Ready = true) -Jul 29 15:51:16.071: INFO: Pod "pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361" satisfied condition "running and ready" -STEP: Deleting configmap cm-test-opt-del-7731fb57-e8b3-4fc5-8c0e-2f4e20f77009 07/29/23 15:51:16.113 -STEP: Updating configmap cm-test-opt-upd-a3bf128b-1baa-45bd-a44b-b176096396b7 07/29/23 15:51:16.122 -STEP: Creating configMap with name cm-test-opt-create-d793e65b-f7d6-4743-86d3-330f1e7dc91d 07/29/23 15:51:16.132 -STEP: waiting to observe update in volume 07/29/23 15:51:16.14 -[AfterEach] [sig-storage] ConfigMap +[BeforeEach] when create a pod with lifecycle hook + test/e2e/common/node/lifecycle_hook.go:77 +STEP: create the container to handle the HTTPGet hook request. 08/24/23 11:50:21.882 +Aug 24 11:50:21.898: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-4178" to be "running and ready" +Aug 24 11:50:21.904: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 5.802929ms +Aug 24 11:50:21.904: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:50:23.912: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 2.014488162s +Aug 24 11:50:23.913: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) +Aug 24 11:50:23.913: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" +[It] should execute prestop exec hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:151 +STEP: create the pod with lifecycle hook 08/24/23 11:50:23.92 +Aug 24 11:50:23.930: INFO: Waiting up to 5m0s for pod "pod-with-prestop-exec-hook" in namespace "container-lifecycle-hook-4178" to be "running and ready" +Aug 24 11:50:23.936: INFO: Pod "pod-with-prestop-exec-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 6.152224ms +Aug 24 11:50:23.936: INFO: The phase of Pod pod-with-prestop-exec-hook is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:50:25.944: INFO: Pod "pod-with-prestop-exec-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.013249729s +Aug 24 11:50:25.944: INFO: The phase of Pod pod-with-prestop-exec-hook is Running (Ready = true) +Aug 24 11:50:25.944: INFO: Pod "pod-with-prestop-exec-hook" satisfied condition "running and ready" +STEP: delete the pod with lifecycle hook 08/24/23 11:50:25.949 +Aug 24 11:50:25.962: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Aug 24 11:50:25.968: INFO: Pod pod-with-prestop-exec-hook still exists +Aug 24 11:50:27.969: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Aug 24 11:50:27.977: INFO: Pod pod-with-prestop-exec-hook still exists +Aug 24 11:50:29.968: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Aug 24 11:50:29.977: INFO: Pod pod-with-prestop-exec-hook no longer exists +STEP: check prestop hook 08/24/23 11:50:29.977 +[AfterEach] [sig-node] Container Lifecycle Hook test/e2e/framework/node/init/init.go:32 -Jul 29 15:51:20.216: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 11:50:30.002: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-1316" for this suite. 07/29/23 15:51:20.229 +STEP: Destroying namespace "container-lifecycle-hook-4178" for this suite. 08/24/23 11:50:30.012 ------------------------------ -• [SLOW TEST] [6.313 seconds] -[sig-storage] ConfigMap -test/e2e/common/storage/framework.go:23 - optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:240 +• [SLOW TEST] [8.195 seconds] +[sig-node] Container Lifecycle Hook +test/e2e/common/node/framework.go:23 + when create a pod with lifecycle hook + test/e2e/common/node/lifecycle_hook.go:46 + should execute prestop exec hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:151 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-node] Container Lifecycle Hook set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:51:13.927 - Jul 29 15:51:13.927: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 15:51:13.947 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:13.98 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:13.988 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 11:50:21.83 + Aug 24 11:50:21.831: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-lifecycle-hook 08/24/23 11:50:21.833 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:21.866 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:21.87 + [BeforeEach] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:31 - [It] optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:240 - STEP: Creating configMap with name cm-test-opt-del-7731fb57-e8b3-4fc5-8c0e-2f4e20f77009 07/29/23 15:51:14.009 - STEP: Creating configMap with name cm-test-opt-upd-a3bf128b-1baa-45bd-a44b-b176096396b7 07/29/23 15:51:14.02 - STEP: Creating the pod 07/29/23 15:51:14.033 - Jul 29 15:51:14.057: INFO: Waiting up to 5m0s for pod "pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361" in namespace "configmap-1316" to be "running and ready" - Jul 29 15:51:14.064: INFO: Pod "pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361": Phase="Pending", Reason="", readiness=false. Elapsed: 5.856181ms - Jul 29 15:51:14.064: INFO: The phase of Pod pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:51:16.070: INFO: Pod "pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361": Phase="Running", Reason="", readiness=true. Elapsed: 2.012605293s - Jul 29 15:51:16.070: INFO: The phase of Pod pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361 is Running (Ready = true) - Jul 29 15:51:16.071: INFO: Pod "pod-configmaps-bff7443e-14ff-47fd-8abc-c3e17021f361" satisfied condition "running and ready" - STEP: Deleting configmap cm-test-opt-del-7731fb57-e8b3-4fc5-8c0e-2f4e20f77009 07/29/23 15:51:16.113 - STEP: Updating configmap cm-test-opt-upd-a3bf128b-1baa-45bd-a44b-b176096396b7 07/29/23 15:51:16.122 - STEP: Creating configMap with name cm-test-opt-create-d793e65b-f7d6-4743-86d3-330f1e7dc91d 07/29/23 15:51:16.132 - STEP: waiting to observe update in volume 07/29/23 15:51:16.14 - [AfterEach] [sig-storage] ConfigMap + [BeforeEach] when create a pod with lifecycle hook + test/e2e/common/node/lifecycle_hook.go:77 + STEP: create the container to handle the HTTPGet hook request. 08/24/23 11:50:21.882 + Aug 24 11:50:21.898: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-4178" to be "running and ready" + Aug 24 11:50:21.904: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 5.802929ms + Aug 24 11:50:21.904: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:50:23.912: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 2.014488162s + Aug 24 11:50:23.913: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) + Aug 24 11:50:23.913: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" + [It] should execute prestop exec hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:151 + STEP: create the pod with lifecycle hook 08/24/23 11:50:23.92 + Aug 24 11:50:23.930: INFO: Waiting up to 5m0s for pod "pod-with-prestop-exec-hook" in namespace "container-lifecycle-hook-4178" to be "running and ready" + Aug 24 11:50:23.936: INFO: Pod "pod-with-prestop-exec-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 6.152224ms + Aug 24 11:50:23.936: INFO: The phase of Pod pod-with-prestop-exec-hook is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:50:25.944: INFO: Pod "pod-with-prestop-exec-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.013249729s + Aug 24 11:50:25.944: INFO: The phase of Pod pod-with-prestop-exec-hook is Running (Ready = true) + Aug 24 11:50:25.944: INFO: Pod "pod-with-prestop-exec-hook" satisfied condition "running and ready" + STEP: delete the pod with lifecycle hook 08/24/23 11:50:25.949 + Aug 24 11:50:25.962: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear + Aug 24 11:50:25.968: INFO: Pod pod-with-prestop-exec-hook still exists + Aug 24 11:50:27.969: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear + Aug 24 11:50:27.977: INFO: Pod pod-with-prestop-exec-hook still exists + Aug 24 11:50:29.968: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear + Aug 24 11:50:29.977: INFO: Pod pod-with-prestop-exec-hook no longer exists + STEP: check prestop hook 08/24/23 11:50:29.977 + [AfterEach] [sig-node] Container Lifecycle Hook test/e2e/framework/node/init/init.go:32 - Jul 29 15:51:20.216: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 11:50:30.002: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-1316" for this suite. 07/29/23 15:51:20.229 + STEP: Destroying namespace "container-lifecycle-hook-4178" for this suite. 08/24/23 11:50:30.012 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-auth] ServiceAccounts - should update a ServiceAccount [Conformance] - test/e2e/auth/service_accounts.go:810 -[BeforeEach] [sig-auth] ServiceAccounts +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition + creating/deleting custom resource definition objects works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:58 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:51:20.243 -Jul 29 15:51:20.243: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename svcaccounts 07/29/23 15:51:20.246 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:20.271 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:20.275 -[BeforeEach] [sig-auth] ServiceAccounts +STEP: Creating a kubernetes client 08/24/23 11:50:30.028 +Aug 24 11:50:30.028: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename custom-resource-definition 08/24/23 11:50:30.03 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:30.064 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:30.069 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should update a ServiceAccount [Conformance] - test/e2e/auth/service_accounts.go:810 -STEP: Creating ServiceAccount "e2e-sa-jh96v" 07/29/23 15:51:20.28 -Jul 29 15:51:20.287: INFO: AutomountServiceAccountToken: false -STEP: Updating ServiceAccount "e2e-sa-jh96v" 07/29/23 15:51:20.287 -Jul 29 15:51:20.299: INFO: AutomountServiceAccountToken: true -[AfterEach] [sig-auth] ServiceAccounts +[It] creating/deleting custom resource definition objects works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:58 +Aug 24 11:50:30.072: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 15:51:20.300: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +Aug 24 11:50:31.113: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "svcaccounts-7950" for this suite. 07/29/23 15:51:20.308 +STEP: Destroying namespace "custom-resource-definition-7262" for this suite. 08/24/23 11:50:31.137 ------------------------------ -• [0.082 seconds] -[sig-auth] ServiceAccounts -test/e2e/auth/framework.go:23 - should update a ServiceAccount [Conformance] - test/e2e/auth/service_accounts.go:810 +• [1.123 seconds] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + Simple CustomResourceDefinition + test/e2e/apimachinery/custom_resource_definition.go:50 + creating/deleting custom resource definition objects works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:58 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-auth] ServiceAccounts + [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:51:20.243 - Jul 29 15:51:20.243: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename svcaccounts 07/29/23 15:51:20.246 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:20.271 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:20.275 - [BeforeEach] [sig-auth] ServiceAccounts + STEP: Creating a kubernetes client 08/24/23 11:50:30.028 + Aug 24 11:50:30.028: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename custom-resource-definition 08/24/23 11:50:30.03 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:30.064 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:30.069 + [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should update a ServiceAccount [Conformance] - test/e2e/auth/service_accounts.go:810 - STEP: Creating ServiceAccount "e2e-sa-jh96v" 07/29/23 15:51:20.28 - Jul 29 15:51:20.287: INFO: AutomountServiceAccountToken: false - STEP: Updating ServiceAccount "e2e-sa-jh96v" 07/29/23 15:51:20.287 - Jul 29 15:51:20.299: INFO: AutomountServiceAccountToken: true - [AfterEach] [sig-auth] ServiceAccounts + [It] creating/deleting custom resource definition objects works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:58 + Aug 24 11:50:30.072: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 15:51:20.300: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + Aug 24 11:50:31.113: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "svcaccounts-7950" for this suite. 07/29/23 15:51:20.308 + STEP: Destroying namespace "custom-resource-definition-7262" for this suite. 08/24/23 11:50:31.137 << End Captured GinkgoWriter Output ------------------------------ -[sig-node] InitContainer [NodeConformance] - should invoke init containers on a RestartAlways pod [Conformance] - test/e2e/common/node/init_container.go:255 -[BeforeEach] [sig-node] InitContainer [NodeConformance] +SSSSSSSSSS +------------------------------ +[sig-network] HostPort + validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] + test/e2e/network/hostport.go:63 +[BeforeEach] [sig-network] HostPort set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:51:20.326 -Jul 29 15:51:20.326: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename init-container 07/29/23 15:51:20.33 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:20.356 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:20.361 -[BeforeEach] [sig-node] InitContainer [NodeConformance] +STEP: Creating a kubernetes client 08/24/23 11:50:31.156 +Aug 24 11:50:31.157: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename hostport 08/24/23 11:50:31.16 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:31.194 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:31.198 +[BeforeEach] [sig-network] HostPort test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] InitContainer [NodeConformance] - test/e2e/common/node/init_container.go:165 -[It] should invoke init containers on a RestartAlways pod [Conformance] - test/e2e/common/node/init_container.go:255 -STEP: creating the pod 07/29/23 15:51:20.366 -Jul 29 15:51:20.366: INFO: PodSpec: initContainers in spec.initContainers -[AfterEach] [sig-node] InitContainer [NodeConformance] +[BeforeEach] [sig-network] HostPort + test/e2e/network/hostport.go:49 +[It] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] + test/e2e/network/hostport.go:63 +STEP: Trying to create a pod(pod1) with hostport 54323 and hostIP 127.0.0.1 and expect scheduled 08/24/23 11:50:31.215 +Aug 24 11:50:31.236: INFO: Waiting up to 5m0s for pod "pod1" in namespace "hostport-9701" to be "running and ready" +Aug 24 11:50:31.257: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 20.631154ms +Aug 24 11:50:31.257: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:50:33.265: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 2.02951089s +Aug 24 11:50:33.266: INFO: The phase of Pod pod1 is Running (Ready = true) +Aug 24 11:50:33.266: INFO: Pod "pod1" satisfied condition "running and ready" +STEP: Trying to create another pod(pod2) with hostport 54323 but hostIP 192.168.121.127 on the node which pod1 resides and expect scheduled 08/24/23 11:50:33.266 +Aug 24 11:50:33.276: INFO: Waiting up to 5m0s for pod "pod2" in namespace "hostport-9701" to be "running and ready" +Aug 24 11:50:33.282: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 6.215385ms +Aug 24 11:50:33.282: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:50:35.293: INFO: Pod "pod2": Phase="Running", Reason="", readiness=false. Elapsed: 2.017284722s +Aug 24 11:50:35.293: INFO: The phase of Pod pod2 is Running (Ready = false) +Aug 24 11:50:37.290: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 4.014333805s +Aug 24 11:50:37.290: INFO: The phase of Pod pod2 is Running (Ready = true) +Aug 24 11:50:37.290: INFO: Pod "pod2" satisfied condition "running and ready" +STEP: Trying to create a third pod(pod3) with hostport 54323, hostIP 192.168.121.127 but use UDP protocol on the node which pod2 resides 08/24/23 11:50:37.291 +Aug 24 11:50:37.302: INFO: Waiting up to 5m0s for pod "pod3" in namespace "hostport-9701" to be "running and ready" +Aug 24 11:50:37.318: INFO: Pod "pod3": Phase="Pending", Reason="", readiness=false. Elapsed: 15.665067ms +Aug 24 11:50:37.318: INFO: The phase of Pod pod3 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:50:39.327: INFO: Pod "pod3": Phase="Running", Reason="", readiness=true. Elapsed: 2.024230923s +Aug 24 11:50:39.327: INFO: The phase of Pod pod3 is Running (Ready = true) +Aug 24 11:50:39.327: INFO: Pod "pod3" satisfied condition "running and ready" +Aug 24 11:50:39.337: INFO: Waiting up to 5m0s for pod "e2e-host-exec" in namespace "hostport-9701" to be "running and ready" +Aug 24 11:50:39.344: INFO: Pod "e2e-host-exec": Phase="Pending", Reason="", readiness=false. Elapsed: 6.223517ms +Aug 24 11:50:39.344: INFO: The phase of Pod e2e-host-exec is Pending, waiting for it to be Running (with Ready = true) +Aug 24 11:50:41.351: INFO: Pod "e2e-host-exec": Phase="Running", Reason="", readiness=true. Elapsed: 2.014027218s +Aug 24 11:50:41.351: INFO: The phase of Pod e2e-host-exec is Running (Ready = true) +Aug 24 11:50:41.351: INFO: Pod "e2e-host-exec" satisfied condition "running and ready" +STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54323 08/24/23 11:50:41.358 +Aug 24 11:50:41.358: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 192.168.121.127 http://127.0.0.1:54323/hostname] Namespace:hostport-9701 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:50:41.358: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:50:41.359: INFO: ExecWithOptions: Clientset creation +Aug 24 11:50:41.360: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-9701/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+--connect-timeout+5+--interface+192.168.121.127+http%3A%2F%2F127.0.0.1%3A54323%2Fhostname&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) +STEP: checking connectivity from pod e2e-host-exec to serverIP: 192.168.121.127, port: 54323 08/24/23 11:50:41.539 +Aug 24 11:50:41.539: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://192.168.121.127:54323/hostname] Namespace:hostport-9701 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:50:41.540: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:50:41.541: INFO: ExecWithOptions: Clientset creation +Aug 24 11:50:41.541: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-9701/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+--connect-timeout+5+http%3A%2F%2F192.168.121.127%3A54323%2Fhostname&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) +STEP: checking connectivity from pod e2e-host-exec to serverIP: 192.168.121.127, port: 54323 UDP 08/24/23 11:50:41.681 +Aug 24 11:50:41.682: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostname | nc -u -w 5 192.168.121.127 54323] Namespace:hostport-9701 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:50:41.682: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:50:41.685: INFO: ExecWithOptions: Clientset creation +Aug 24 11:50:41.685: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-9701/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostname+%7C+nc+-u+-w+5+192.168.121.127+54323&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) +[AfterEach] [sig-network] HostPort test/e2e/framework/node/init/init.go:32 -Jul 29 15:51:23.991: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +Aug 24 11:50:46.805: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] HostPort test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +[DeferCleanup (Each)] [sig-network] HostPort dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +[DeferCleanup (Each)] [sig-network] HostPort tear down framework | framework.go:193 -STEP: Destroying namespace "init-container-6839" for this suite. 07/29/23 15:51:24.001 +STEP: Destroying namespace "hostport-9701" for this suite. 08/24/23 11:50:46.818 ------------------------------ -• [3.685 seconds] -[sig-node] InitContainer [NodeConformance] -test/e2e/common/node/framework.go:23 - should invoke init containers on a RestartAlways pod [Conformance] - test/e2e/common/node/init_container.go:255 +• [SLOW TEST] [15.672 seconds] +[sig-network] HostPort +test/e2e/network/common/framework.go:23 + validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] + test/e2e/network/hostport.go:63 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] InitContainer [NodeConformance] + [BeforeEach] [sig-network] HostPort set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:51:20.326 - Jul 29 15:51:20.326: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename init-container 07/29/23 15:51:20.33 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:20.356 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:20.361 - [BeforeEach] [sig-node] InitContainer [NodeConformance] + STEP: Creating a kubernetes client 08/24/23 11:50:31.156 + Aug 24 11:50:31.157: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename hostport 08/24/23 11:50:31.16 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:31.194 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:31.198 + [BeforeEach] [sig-network] HostPort test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] InitContainer [NodeConformance] - test/e2e/common/node/init_container.go:165 - [It] should invoke init containers on a RestartAlways pod [Conformance] - test/e2e/common/node/init_container.go:255 - STEP: creating the pod 07/29/23 15:51:20.366 - Jul 29 15:51:20.366: INFO: PodSpec: initContainers in spec.initContainers - [AfterEach] [sig-node] InitContainer [NodeConformance] + [BeforeEach] [sig-network] HostPort + test/e2e/network/hostport.go:49 + [It] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] + test/e2e/network/hostport.go:63 + STEP: Trying to create a pod(pod1) with hostport 54323 and hostIP 127.0.0.1 and expect scheduled 08/24/23 11:50:31.215 + Aug 24 11:50:31.236: INFO: Waiting up to 5m0s for pod "pod1" in namespace "hostport-9701" to be "running and ready" + Aug 24 11:50:31.257: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 20.631154ms + Aug 24 11:50:31.257: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:50:33.265: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 2.02951089s + Aug 24 11:50:33.266: INFO: The phase of Pod pod1 is Running (Ready = true) + Aug 24 11:50:33.266: INFO: Pod "pod1" satisfied condition "running and ready" + STEP: Trying to create another pod(pod2) with hostport 54323 but hostIP 192.168.121.127 on the node which pod1 resides and expect scheduled 08/24/23 11:50:33.266 + Aug 24 11:50:33.276: INFO: Waiting up to 5m0s for pod "pod2" in namespace "hostport-9701" to be "running and ready" + Aug 24 11:50:33.282: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 6.215385ms + Aug 24 11:50:33.282: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:50:35.293: INFO: Pod "pod2": Phase="Running", Reason="", readiness=false. Elapsed: 2.017284722s + Aug 24 11:50:35.293: INFO: The phase of Pod pod2 is Running (Ready = false) + Aug 24 11:50:37.290: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 4.014333805s + Aug 24 11:50:37.290: INFO: The phase of Pod pod2 is Running (Ready = true) + Aug 24 11:50:37.290: INFO: Pod "pod2" satisfied condition "running and ready" + STEP: Trying to create a third pod(pod3) with hostport 54323, hostIP 192.168.121.127 but use UDP protocol on the node which pod2 resides 08/24/23 11:50:37.291 + Aug 24 11:50:37.302: INFO: Waiting up to 5m0s for pod "pod3" in namespace "hostport-9701" to be "running and ready" + Aug 24 11:50:37.318: INFO: Pod "pod3": Phase="Pending", Reason="", readiness=false. Elapsed: 15.665067ms + Aug 24 11:50:37.318: INFO: The phase of Pod pod3 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:50:39.327: INFO: Pod "pod3": Phase="Running", Reason="", readiness=true. Elapsed: 2.024230923s + Aug 24 11:50:39.327: INFO: The phase of Pod pod3 is Running (Ready = true) + Aug 24 11:50:39.327: INFO: Pod "pod3" satisfied condition "running and ready" + Aug 24 11:50:39.337: INFO: Waiting up to 5m0s for pod "e2e-host-exec" in namespace "hostport-9701" to be "running and ready" + Aug 24 11:50:39.344: INFO: Pod "e2e-host-exec": Phase="Pending", Reason="", readiness=false. Elapsed: 6.223517ms + Aug 24 11:50:39.344: INFO: The phase of Pod e2e-host-exec is Pending, waiting for it to be Running (with Ready = true) + Aug 24 11:50:41.351: INFO: Pod "e2e-host-exec": Phase="Running", Reason="", readiness=true. Elapsed: 2.014027218s + Aug 24 11:50:41.351: INFO: The phase of Pod e2e-host-exec is Running (Ready = true) + Aug 24 11:50:41.351: INFO: Pod "e2e-host-exec" satisfied condition "running and ready" + STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54323 08/24/23 11:50:41.358 + Aug 24 11:50:41.358: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 192.168.121.127 http://127.0.0.1:54323/hostname] Namespace:hostport-9701 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:50:41.358: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:50:41.359: INFO: ExecWithOptions: Clientset creation + Aug 24 11:50:41.360: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-9701/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+--connect-timeout+5+--interface+192.168.121.127+http%3A%2F%2F127.0.0.1%3A54323%2Fhostname&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) + STEP: checking connectivity from pod e2e-host-exec to serverIP: 192.168.121.127, port: 54323 08/24/23 11:50:41.539 + Aug 24 11:50:41.539: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://192.168.121.127:54323/hostname] Namespace:hostport-9701 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:50:41.540: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:50:41.541: INFO: ExecWithOptions: Clientset creation + Aug 24 11:50:41.541: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-9701/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+--connect-timeout+5+http%3A%2F%2F192.168.121.127%3A54323%2Fhostname&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) + STEP: checking connectivity from pod e2e-host-exec to serverIP: 192.168.121.127, port: 54323 UDP 08/24/23 11:50:41.681 + Aug 24 11:50:41.682: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostname | nc -u -w 5 192.168.121.127 54323] Namespace:hostport-9701 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:50:41.682: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:50:41.685: INFO: ExecWithOptions: Clientset creation + Aug 24 11:50:41.685: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-9701/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostname+%7C+nc+-u+-w+5+192.168.121.127+54323&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) + [AfterEach] [sig-network] HostPort test/e2e/framework/node/init/init.go:32 - Jul 29 15:51:23.991: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + Aug 24 11:50:46.805: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] HostPort test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + [DeferCleanup (Each)] [sig-network] HostPort dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + [DeferCleanup (Each)] [sig-network] HostPort tear down framework | framework.go:193 - STEP: Destroying namespace "init-container-6839" for this suite. 07/29/23 15:51:24.001 + STEP: Destroying namespace "hostport-9701" for this suite. 08/24/23 11:50:46.818 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] DisruptionController - should block an eviction until the PDB is updated to allow it [Conformance] - test/e2e/apps/disruption.go:347 -[BeforeEach] [sig-apps] DisruptionController +[sig-apps] Job + should apply changes to a job status [Conformance] + test/e2e/apps/job.go:636 +[BeforeEach] [sig-apps] Job set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:51:24.022 -Jul 29 15:51:24.022: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename disruption 07/29/23 15:51:24.024 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:24.058 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:24.064 -[BeforeEach] [sig-apps] DisruptionController +STEP: Creating a kubernetes client 08/24/23 11:50:46.833 +Aug 24 11:50:46.833: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename job 08/24/23 11:50:46.835 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:46.869 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:46.878 +[BeforeEach] [sig-apps] Job test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] DisruptionController - test/e2e/apps/disruption.go:72 -[It] should block an eviction until the PDB is updated to allow it [Conformance] - test/e2e/apps/disruption.go:347 -STEP: Creating a pdb that targets all three pods in a test replica set 07/29/23 15:51:24.068 -STEP: Waiting for the pdb to be processed 07/29/23 15:51:24.076 -STEP: First trying to evict a pod which shouldn't be evictable 07/29/23 15:51:26.098 -STEP: Waiting for all pods to be running 07/29/23 15:51:26.098 -Jul 29 15:51:26.108: INFO: pods: 0 < 3 -STEP: locating a running pod 07/29/23 15:51:28.116 -STEP: Updating the pdb to allow a pod to be evicted 07/29/23 15:51:28.131 -STEP: Waiting for the pdb to be processed 07/29/23 15:51:28.142 -STEP: Trying to evict the same pod we tried earlier which should now be evictable 07/29/23 15:51:30.158 -STEP: Waiting for all pods to be running 07/29/23 15:51:30.158 -STEP: Waiting for the pdb to observed all healthy pods 07/29/23 15:51:30.164 -STEP: Patching the pdb to disallow a pod to be evicted 07/29/23 15:51:30.219 -STEP: Waiting for the pdb to be processed 07/29/23 15:51:30.329 -STEP: Waiting for all pods to be running 07/29/23 15:51:32.353 -STEP: locating a running pod 07/29/23 15:51:32.363 -STEP: Deleting the pdb to allow a pod to be evicted 07/29/23 15:51:32.383 -STEP: Waiting for the pdb to be deleted 07/29/23 15:51:32.4 -STEP: Trying to evict the same pod we tried earlier which should now be evictable 07/29/23 15:51:32.41 -STEP: Waiting for all pods to be running 07/29/23 15:51:32.411 -[AfterEach] [sig-apps] DisruptionController +[It] should apply changes to a job status [Conformance] + test/e2e/apps/job.go:636 +STEP: Creating a job 08/24/23 11:50:46.884 +STEP: Ensure pods equal to parallelism count is attached to the job 08/24/23 11:50:46.897 +STEP: patching /status 08/24/23 11:50:48.908 +STEP: updating /status 08/24/23 11:50:48.926 +STEP: get /status 08/24/23 11:50:48.97 +[AfterEach] [sig-apps] Job test/e2e/framework/node/init/init.go:32 -Jul 29 15:51:32.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] DisruptionController +Aug 24 11:50:48.977: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Job test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] DisruptionController +[DeferCleanup (Each)] [sig-apps] Job dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] DisruptionController +[DeferCleanup (Each)] [sig-apps] Job tear down framework | framework.go:193 -STEP: Destroying namespace "disruption-9968" for this suite. 07/29/23 15:51:32.479 +STEP: Destroying namespace "job-5732" for this suite. 08/24/23 11:50:48.99 ------------------------------ -• [SLOW TEST] [8.478 seconds] -[sig-apps] DisruptionController +• [2.172 seconds] +[sig-apps] Job test/e2e/apps/framework.go:23 - should block an eviction until the PDB is updated to allow it [Conformance] - test/e2e/apps/disruption.go:347 + should apply changes to a job status [Conformance] + test/e2e/apps/job.go:636 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] DisruptionController + [BeforeEach] [sig-apps] Job set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:51:24.022 - Jul 29 15:51:24.022: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename disruption 07/29/23 15:51:24.024 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:24.058 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:24.064 - [BeforeEach] [sig-apps] DisruptionController + STEP: Creating a kubernetes client 08/24/23 11:50:46.833 + Aug 24 11:50:46.833: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename job 08/24/23 11:50:46.835 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:46.869 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:46.878 + [BeforeEach] [sig-apps] Job test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] DisruptionController - test/e2e/apps/disruption.go:72 - [It] should block an eviction until the PDB is updated to allow it [Conformance] - test/e2e/apps/disruption.go:347 - STEP: Creating a pdb that targets all three pods in a test replica set 07/29/23 15:51:24.068 - STEP: Waiting for the pdb to be processed 07/29/23 15:51:24.076 - STEP: First trying to evict a pod which shouldn't be evictable 07/29/23 15:51:26.098 - STEP: Waiting for all pods to be running 07/29/23 15:51:26.098 - Jul 29 15:51:26.108: INFO: pods: 0 < 3 - STEP: locating a running pod 07/29/23 15:51:28.116 - STEP: Updating the pdb to allow a pod to be evicted 07/29/23 15:51:28.131 - STEP: Waiting for the pdb to be processed 07/29/23 15:51:28.142 - STEP: Trying to evict the same pod we tried earlier which should now be evictable 07/29/23 15:51:30.158 - STEP: Waiting for all pods to be running 07/29/23 15:51:30.158 - STEP: Waiting for the pdb to observed all healthy pods 07/29/23 15:51:30.164 - STEP: Patching the pdb to disallow a pod to be evicted 07/29/23 15:51:30.219 - STEP: Waiting for the pdb to be processed 07/29/23 15:51:30.329 - STEP: Waiting for all pods to be running 07/29/23 15:51:32.353 - STEP: locating a running pod 07/29/23 15:51:32.363 - STEP: Deleting the pdb to allow a pod to be evicted 07/29/23 15:51:32.383 - STEP: Waiting for the pdb to be deleted 07/29/23 15:51:32.4 - STEP: Trying to evict the same pod we tried earlier which should now be evictable 07/29/23 15:51:32.41 - STEP: Waiting for all pods to be running 07/29/23 15:51:32.411 - [AfterEach] [sig-apps] DisruptionController + [It] should apply changes to a job status [Conformance] + test/e2e/apps/job.go:636 + STEP: Creating a job 08/24/23 11:50:46.884 + STEP: Ensure pods equal to parallelism count is attached to the job 08/24/23 11:50:46.897 + STEP: patching /status 08/24/23 11:50:48.908 + STEP: updating /status 08/24/23 11:50:48.926 + STEP: get /status 08/24/23 11:50:48.97 + [AfterEach] [sig-apps] Job test/e2e/framework/node/init/init.go:32 - Jul 29 15:51:32.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] DisruptionController + Aug 24 11:50:48.977: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Job test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] DisruptionController + [DeferCleanup (Each)] [sig-apps] Job dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] DisruptionController + [DeferCleanup (Each)] [sig-apps] Job tear down framework | framework.go:193 - STEP: Destroying namespace "disruption-9968" for this suite. 07/29/23 15:51:32.479 + STEP: Destroying namespace "job-5732" for this suite. 08/24/23 11:50:48.99 << End Captured GinkgoWriter Output ------------------------------ -SSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-network] Services - should provide secure master service [Conformance] - test/e2e/network/service.go:777 + should be able to change the type from ExternalName to ClusterIP [Conformance] + test/e2e/network/service.go:1438 [BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:51:32.502 -Jul 29 15:51:32.502: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 15:51:32.506 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:32.558 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:32.564 +STEP: Creating a kubernetes client 08/24/23 11:50:49.015 +Aug 24 11:50:49.015: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 11:50:49.016 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:49.046 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:49.051 [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] Services test/e2e/network/service.go:766 -[It] should provide secure master service [Conformance] - test/e2e/network/service.go:777 +[It] should be able to change the type from ExternalName to ClusterIP [Conformance] + test/e2e/network/service.go:1438 +STEP: creating a service externalname-service with the type=ExternalName in namespace services-5111 08/24/23 11:50:49.057 +STEP: changing the ExternalName service to type=ClusterIP 08/24/23 11:50:49.071 +STEP: creating replication controller externalname-service in namespace services-5111 08/24/23 11:50:49.101 +I0824 11:50:49.114731 14 runners.go:193] Created replication controller with name: externalname-service, namespace: services-5111, replica count: 2 +I0824 11:50:52.166255 14 runners.go:193] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Aug 24 11:50:52.166: INFO: Creating new exec pod +Aug 24 11:50:52.182: INFO: Waiting up to 5m0s for pod "execpodllvn6" in namespace "services-5111" to be "running" +Aug 24 11:50:52.189: INFO: Pod "execpodllvn6": Phase="Pending", Reason="", readiness=false. Elapsed: 6.200063ms +Aug 24 11:50:54.198: INFO: Pod "execpodllvn6": Phase="Running", Reason="", readiness=true. Elapsed: 2.01522407s +Aug 24 11:50:54.198: INFO: Pod "execpodllvn6" satisfied condition "running" +Aug 24 11:50:55.200: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-5111 exec execpodllvn6 -- /bin/sh -x -c nc -v -z -w 2 externalname-service 80' +Aug 24 11:50:55.513: INFO: stderr: "+ nc -v -z -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" +Aug 24 11:50:55.513: INFO: stdout: "" +Aug 24 11:50:55.513: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-5111 exec execpodllvn6 -- /bin/sh -x -c nc -v -z -w 2 10.233.53.209 80' +Aug 24 11:50:55.746: INFO: stderr: "+ nc -v -z -w 2 10.233.53.209 80\nConnection to 10.233.53.209 80 port [tcp/http] succeeded!\n" +Aug 24 11:50:55.746: INFO: stdout: "" +Aug 24 11:50:55.746: INFO: Cleaning up the ExternalName to ClusterIP test service [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 15:51:32.574: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 11:50:55.782: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "services-1858" for this suite. 07/29/23 15:51:32.581 +STEP: Destroying namespace "services-5111" for this suite. 08/24/23 11:50:55.795 ------------------------------ -• [0.092 seconds] +• [SLOW TEST] [6.796 seconds] [sig-network] Services test/e2e/network/common/framework.go:23 - should provide secure master service [Conformance] - test/e2e/network/service.go:777 + should be able to change the type from ExternalName to ClusterIP [Conformance] + test/e2e/network/service.go:1438 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:51:32.502 - Jul 29 15:51:32.502: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 15:51:32.506 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:32.558 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:32.564 + STEP: Creating a kubernetes client 08/24/23 11:50:49.015 + Aug 24 11:50:49.015: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 11:50:49.016 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:49.046 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:49.051 [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] Services test/e2e/network/service.go:766 - [It] should provide secure master service [Conformance] - test/e2e/network/service.go:777 + [It] should be able to change the type from ExternalName to ClusterIP [Conformance] + test/e2e/network/service.go:1438 + STEP: creating a service externalname-service with the type=ExternalName in namespace services-5111 08/24/23 11:50:49.057 + STEP: changing the ExternalName service to type=ClusterIP 08/24/23 11:50:49.071 + STEP: creating replication controller externalname-service in namespace services-5111 08/24/23 11:50:49.101 + I0824 11:50:49.114731 14 runners.go:193] Created replication controller with name: externalname-service, namespace: services-5111, replica count: 2 + I0824 11:50:52.166255 14 runners.go:193] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + Aug 24 11:50:52.166: INFO: Creating new exec pod + Aug 24 11:50:52.182: INFO: Waiting up to 5m0s for pod "execpodllvn6" in namespace "services-5111" to be "running" + Aug 24 11:50:52.189: INFO: Pod "execpodllvn6": Phase="Pending", Reason="", readiness=false. Elapsed: 6.200063ms + Aug 24 11:50:54.198: INFO: Pod "execpodllvn6": Phase="Running", Reason="", readiness=true. Elapsed: 2.01522407s + Aug 24 11:50:54.198: INFO: Pod "execpodllvn6" satisfied condition "running" + Aug 24 11:50:55.200: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-5111 exec execpodllvn6 -- /bin/sh -x -c nc -v -z -w 2 externalname-service 80' + Aug 24 11:50:55.513: INFO: stderr: "+ nc -v -z -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" + Aug 24 11:50:55.513: INFO: stdout: "" + Aug 24 11:50:55.513: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-5111 exec execpodllvn6 -- /bin/sh -x -c nc -v -z -w 2 10.233.53.209 80' + Aug 24 11:50:55.746: INFO: stderr: "+ nc -v -z -w 2 10.233.53.209 80\nConnection to 10.233.53.209 80 port [tcp/http] succeeded!\n" + Aug 24 11:50:55.746: INFO: stdout: "" + Aug 24 11:50:55.746: INFO: Cleaning up the ExternalName to ClusterIP test service [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 15:51:32.574: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 11:50:55.782: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "services-1858" for this suite. 07/29/23 15:51:32.581 + STEP: Destroying namespace "services-5111" for this suite. 08/24/23 11:50:55.795 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook - should execute prestop http hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:212 -[BeforeEach] [sig-node] Container Lifecycle Hook +[sig-apps] DisruptionController + should block an eviction until the PDB is updated to allow it [Conformance] + test/e2e/apps/disruption.go:347 +[BeforeEach] [sig-apps] DisruptionController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:51:32.597 -Jul 29 15:51:32.598: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-lifecycle-hook 07/29/23 15:51:32.6 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:32.626 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:32.63 -[BeforeEach] [sig-node] Container Lifecycle Hook +STEP: Creating a kubernetes client 08/24/23 11:50:55.813 +Aug 24 11:50:55.813: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename disruption 08/24/23 11:50:55.818 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:55.853 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:55.86 +[BeforeEach] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:77 -STEP: create the container to handle the HTTPGet hook request. 07/29/23 15:51:32.641 -Jul 29 15:51:32.654: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-7254" to be "running and ready" -Jul 29 15:51:32.662: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 7.354902ms -Jul 29 15:51:32.662: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:51:34.673: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019023343s -Jul 29 15:51:34.674: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:51:36.674: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 4.019170305s -Jul 29 15:51:36.674: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:51:38.672: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 6.017985967s -Jul 29 15:51:38.673: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:51:40.671: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 8.016832163s -Jul 29 15:51:40.671: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:51:42.671: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 10.016923442s -Jul 29 15:51:42.671: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:51:44.673: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 12.0184s -Jul 29 15:51:44.673: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:51:46.671: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 14.016629532s -Jul 29 15:51:46.671: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) -Jul 29 15:51:46.671: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" -[It] should execute prestop http hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:212 -STEP: create the pod with lifecycle hook 07/29/23 15:51:46.678 -Jul 29 15:51:46.692: INFO: Waiting up to 5m0s for pod "pod-with-prestop-http-hook" in namespace "container-lifecycle-hook-7254" to be "running and ready" -Jul 29 15:51:46.697: INFO: Pod "pod-with-prestop-http-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 4.944863ms -Jul 29 15:51:46.697: INFO: The phase of Pod pod-with-prestop-http-hook is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:51:48.705: INFO: Pod "pod-with-prestop-http-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.012866868s -Jul 29 15:51:48.705: INFO: The phase of Pod pod-with-prestop-http-hook is Running (Ready = true) -Jul 29 15:51:48.705: INFO: Pod "pod-with-prestop-http-hook" satisfied condition "running and ready" -STEP: delete the pod with lifecycle hook 07/29/23 15:51:48.712 -Jul 29 15:51:48.725: INFO: Waiting for pod pod-with-prestop-http-hook to disappear -Jul 29 15:51:48.733: INFO: Pod pod-with-prestop-http-hook still exists -Jul 29 15:51:50.734: INFO: Waiting for pod pod-with-prestop-http-hook to disappear -Jul 29 15:51:50.741: INFO: Pod pod-with-prestop-http-hook still exists -Jul 29 15:51:52.734: INFO: Waiting for pod pod-with-prestop-http-hook to disappear -Jul 29 15:51:52.744: INFO: Pod pod-with-prestop-http-hook no longer exists -STEP: check prestop hook 07/29/23 15:51:52.744 -[AfterEach] [sig-node] Container Lifecycle Hook +[BeforeEach] [sig-apps] DisruptionController + test/e2e/apps/disruption.go:72 +[It] should block an eviction until the PDB is updated to allow it [Conformance] + test/e2e/apps/disruption.go:347 +STEP: Creating a pdb that targets all three pods in a test replica set 08/24/23 11:50:55.865 +STEP: Waiting for the pdb to be processed 08/24/23 11:50:55.875 +STEP: First trying to evict a pod which shouldn't be evictable 08/24/23 11:50:57.905 +STEP: Waiting for all pods to be running 08/24/23 11:50:57.905 +Aug 24 11:50:57.913: INFO: pods: 0 < 3 +Aug 24 11:50:59.924: INFO: running pods: 2 < 3 +STEP: locating a running pod 08/24/23 11:51:01.926 +STEP: Updating the pdb to allow a pod to be evicted 08/24/23 11:51:01.974 +STEP: Waiting for the pdb to be processed 08/24/23 11:51:02 +STEP: Trying to evict the same pod we tried earlier which should now be evictable 08/24/23 11:51:04.012 +STEP: Waiting for all pods to be running 08/24/23 11:51:04.013 +STEP: Waiting for the pdb to observed all healthy pods 08/24/23 11:51:04.02 +STEP: Patching the pdb to disallow a pod to be evicted 08/24/23 11:51:04.067 +STEP: Waiting for the pdb to be processed 08/24/23 11:51:04.14 +STEP: Waiting for all pods to be running 08/24/23 11:51:06.164 +STEP: locating a running pod 08/24/23 11:51:06.17 +STEP: Deleting the pdb to allow a pod to be evicted 08/24/23 11:51:06.196 +STEP: Waiting for the pdb to be deleted 08/24/23 11:51:06.207 +STEP: Trying to evict the same pod we tried earlier which should now be evictable 08/24/23 11:51:06.213 +STEP: Waiting for all pods to be running 08/24/23 11:51:06.213 +[AfterEach] [sig-apps] DisruptionController test/e2e/framework/node/init/init.go:32 -Jul 29 15:51:52.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook +Aug 24 11:51:06.243: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook +[DeferCleanup (Each)] [sig-apps] DisruptionController dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook +[DeferCleanup (Each)] [sig-apps] DisruptionController tear down framework | framework.go:193 -STEP: Destroying namespace "container-lifecycle-hook-7254" for this suite. 07/29/23 15:51:52.797 +STEP: Destroying namespace "disruption-3505" for this suite. 08/24/23 11:51:06.319 ------------------------------ -• [SLOW TEST] [20.214 seconds] -[sig-node] Container Lifecycle Hook -test/e2e/common/node/framework.go:23 - when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:46 - should execute prestop http hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:212 +• [SLOW TEST] [10.532 seconds] +[sig-apps] DisruptionController +test/e2e/apps/framework.go:23 + should block an eviction until the PDB is updated to allow it [Conformance] + test/e2e/apps/disruption.go:347 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Container Lifecycle Hook + [BeforeEach] [sig-apps] DisruptionController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:51:32.597 - Jul 29 15:51:32.598: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-lifecycle-hook 07/29/23 15:51:32.6 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:32.626 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:32.63 - [BeforeEach] [sig-node] Container Lifecycle Hook + STEP: Creating a kubernetes client 08/24/23 11:50:55.813 + Aug 24 11:50:55.813: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename disruption 08/24/23 11:50:55.818 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:50:55.853 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:50:55.86 + [BeforeEach] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:77 - STEP: create the container to handle the HTTPGet hook request. 07/29/23 15:51:32.641 - Jul 29 15:51:32.654: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-7254" to be "running and ready" - Jul 29 15:51:32.662: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 7.354902ms - Jul 29 15:51:32.662: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:51:34.673: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019023343s - Jul 29 15:51:34.674: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:51:36.674: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 4.019170305s - Jul 29 15:51:36.674: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:51:38.672: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 6.017985967s - Jul 29 15:51:38.673: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:51:40.671: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 8.016832163s - Jul 29 15:51:40.671: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:51:42.671: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 10.016923442s - Jul 29 15:51:42.671: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:51:44.673: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 12.0184s - Jul 29 15:51:44.673: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:51:46.671: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 14.016629532s - Jul 29 15:51:46.671: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) - Jul 29 15:51:46.671: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" - [It] should execute prestop http hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:212 - STEP: create the pod with lifecycle hook 07/29/23 15:51:46.678 - Jul 29 15:51:46.692: INFO: Waiting up to 5m0s for pod "pod-with-prestop-http-hook" in namespace "container-lifecycle-hook-7254" to be "running and ready" - Jul 29 15:51:46.697: INFO: Pod "pod-with-prestop-http-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 4.944863ms - Jul 29 15:51:46.697: INFO: The phase of Pod pod-with-prestop-http-hook is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:51:48.705: INFO: Pod "pod-with-prestop-http-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.012866868s - Jul 29 15:51:48.705: INFO: The phase of Pod pod-with-prestop-http-hook is Running (Ready = true) - Jul 29 15:51:48.705: INFO: Pod "pod-with-prestop-http-hook" satisfied condition "running and ready" - STEP: delete the pod with lifecycle hook 07/29/23 15:51:48.712 - Jul 29 15:51:48.725: INFO: Waiting for pod pod-with-prestop-http-hook to disappear - Jul 29 15:51:48.733: INFO: Pod pod-with-prestop-http-hook still exists - Jul 29 15:51:50.734: INFO: Waiting for pod pod-with-prestop-http-hook to disappear - Jul 29 15:51:50.741: INFO: Pod pod-with-prestop-http-hook still exists - Jul 29 15:51:52.734: INFO: Waiting for pod pod-with-prestop-http-hook to disappear - Jul 29 15:51:52.744: INFO: Pod pod-with-prestop-http-hook no longer exists - STEP: check prestop hook 07/29/23 15:51:52.744 - [AfterEach] [sig-node] Container Lifecycle Hook + [BeforeEach] [sig-apps] DisruptionController + test/e2e/apps/disruption.go:72 + [It] should block an eviction until the PDB is updated to allow it [Conformance] + test/e2e/apps/disruption.go:347 + STEP: Creating a pdb that targets all three pods in a test replica set 08/24/23 11:50:55.865 + STEP: Waiting for the pdb to be processed 08/24/23 11:50:55.875 + STEP: First trying to evict a pod which shouldn't be evictable 08/24/23 11:50:57.905 + STEP: Waiting for all pods to be running 08/24/23 11:50:57.905 + Aug 24 11:50:57.913: INFO: pods: 0 < 3 + Aug 24 11:50:59.924: INFO: running pods: 2 < 3 + STEP: locating a running pod 08/24/23 11:51:01.926 + STEP: Updating the pdb to allow a pod to be evicted 08/24/23 11:51:01.974 + STEP: Waiting for the pdb to be processed 08/24/23 11:51:02 + STEP: Trying to evict the same pod we tried earlier which should now be evictable 08/24/23 11:51:04.012 + STEP: Waiting for all pods to be running 08/24/23 11:51:04.013 + STEP: Waiting for the pdb to observed all healthy pods 08/24/23 11:51:04.02 + STEP: Patching the pdb to disallow a pod to be evicted 08/24/23 11:51:04.067 + STEP: Waiting for the pdb to be processed 08/24/23 11:51:04.14 + STEP: Waiting for all pods to be running 08/24/23 11:51:06.164 + STEP: locating a running pod 08/24/23 11:51:06.17 + STEP: Deleting the pdb to allow a pod to be evicted 08/24/23 11:51:06.196 + STEP: Waiting for the pdb to be deleted 08/24/23 11:51:06.207 + STEP: Trying to evict the same pod we tried earlier which should now be evictable 08/24/23 11:51:06.213 + STEP: Waiting for all pods to be running 08/24/23 11:51:06.213 + [AfterEach] [sig-apps] DisruptionController test/e2e/framework/node/init/init.go:32 - Jul 29 15:51:52.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + Aug 24 11:51:06.243: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + [DeferCleanup (Each)] [sig-apps] DisruptionController dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + [DeferCleanup (Each)] [sig-apps] DisruptionController tear down framework | framework.go:193 - STEP: Destroying namespace "container-lifecycle-hook-7254" for this suite. 07/29/23 15:51:52.797 + STEP: Destroying namespace "disruption-3505" for this suite. 08/24/23 11:51:06.319 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSSSSS ------------------------------ -[sig-api-machinery] Watchers - should receive events on concurrent watches in same order [Conformance] - test/e2e/apimachinery/watch.go:334 -[BeforeEach] [sig-api-machinery] Watchers +[sig-node] PodTemplates + should delete a collection of pod templates [Conformance] + test/e2e/common/node/podtemplates.go:122 +[BeforeEach] [sig-node] PodTemplates set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:51:52.815 -Jul 29 15:51:52.815: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename watch 07/29/23 15:51:52.819 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:52.893 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:52.898 -[BeforeEach] [sig-api-machinery] Watchers +STEP: Creating a kubernetes client 08/24/23 11:51:06.349 +Aug 24 11:51:06.350: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename podtemplate 08/24/23 11:51:06.355 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:51:06.397 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:51:06.404 +[BeforeEach] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:31 -[It] should receive events on concurrent watches in same order [Conformance] - test/e2e/apimachinery/watch.go:334 -STEP: getting a starting resourceVersion 07/29/23 15:51:52.905 -STEP: starting a background goroutine to produce watch events 07/29/23 15:51:52.911 -STEP: creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order 07/29/23 15:51:52.912 -[AfterEach] [sig-api-machinery] Watchers +[It] should delete a collection of pod templates [Conformance] + test/e2e/common/node/podtemplates.go:122 +STEP: Create set of pod templates 08/24/23 11:51:06.408 +Aug 24 11:51:06.418: INFO: created test-podtemplate-1 +Aug 24 11:51:06.435: INFO: created test-podtemplate-2 +Aug 24 11:51:06.443: INFO: created test-podtemplate-3 +STEP: get a list of pod templates with a label in the current namespace 08/24/23 11:51:06.443 +STEP: delete collection of pod templates 08/24/23 11:51:06.45 +Aug 24 11:51:06.451: INFO: requesting DeleteCollection of pod templates +STEP: check that the list of pod templates matches the requested quantity 08/24/23 11:51:06.486 +Aug 24 11:51:06.486: INFO: requesting list of pod templates to confirm quantity +[AfterEach] [sig-node] PodTemplates test/e2e/framework/node/init/init.go:32 -Jul 29 15:51:55.630: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Watchers +Aug 24 11:51:06.493: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Watchers +[DeferCleanup (Each)] [sig-node] PodTemplates dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Watchers +[DeferCleanup (Each)] [sig-node] PodTemplates tear down framework | framework.go:193 -STEP: Destroying namespace "watch-2762" for this suite. 07/29/23 15:51:55.704 +STEP: Destroying namespace "podtemplate-9143" for this suite. 08/24/23 11:51:06.504 ------------------------------ -• [2.922 seconds] -[sig-api-machinery] Watchers -test/e2e/apimachinery/framework.go:23 - should receive events on concurrent watches in same order [Conformance] - test/e2e/apimachinery/watch.go:334 +• [0.166 seconds] +[sig-node] PodTemplates +test/e2e/common/node/framework.go:23 + should delete a collection of pod templates [Conformance] + test/e2e/common/node/podtemplates.go:122 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Watchers + [BeforeEach] [sig-node] PodTemplates set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:51:52.815 - Jul 29 15:51:52.815: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename watch 07/29/23 15:51:52.819 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:52.893 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:52.898 - [BeforeEach] [sig-api-machinery] Watchers + STEP: Creating a kubernetes client 08/24/23 11:51:06.349 + Aug 24 11:51:06.350: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename podtemplate 08/24/23 11:51:06.355 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:51:06.397 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:51:06.404 + [BeforeEach] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:31 - [It] should receive events on concurrent watches in same order [Conformance] - test/e2e/apimachinery/watch.go:334 - STEP: getting a starting resourceVersion 07/29/23 15:51:52.905 - STEP: starting a background goroutine to produce watch events 07/29/23 15:51:52.911 - STEP: creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order 07/29/23 15:51:52.912 - [AfterEach] [sig-api-machinery] Watchers + [It] should delete a collection of pod templates [Conformance] + test/e2e/common/node/podtemplates.go:122 + STEP: Create set of pod templates 08/24/23 11:51:06.408 + Aug 24 11:51:06.418: INFO: created test-podtemplate-1 + Aug 24 11:51:06.435: INFO: created test-podtemplate-2 + Aug 24 11:51:06.443: INFO: created test-podtemplate-3 + STEP: get a list of pod templates with a label in the current namespace 08/24/23 11:51:06.443 + STEP: delete collection of pod templates 08/24/23 11:51:06.45 + Aug 24 11:51:06.451: INFO: requesting DeleteCollection of pod templates + STEP: check that the list of pod templates matches the requested quantity 08/24/23 11:51:06.486 + Aug 24 11:51:06.486: INFO: requesting list of pod templates to confirm quantity + [AfterEach] [sig-node] PodTemplates test/e2e/framework/node/init/init.go:32 - Jul 29 15:51:55.630: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Watchers + Aug 24 11:51:06.493: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Watchers + [DeferCleanup (Each)] [sig-node] PodTemplates dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Watchers + [DeferCleanup (Each)] [sig-node] PodTemplates tear down framework | framework.go:193 - STEP: Destroying namespace "watch-2762" for this suite. 07/29/23 15:51:55.704 + STEP: Destroying namespace "podtemplate-9143" for this suite. 08/24/23 11:51:06.504 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should be able to change the type from ExternalName to NodePort [Conformance] - test/e2e/network/service.go:1477 -[BeforeEach] [sig-network] Services +[sig-storage] EmptyDir volumes + pod should support shared volumes between containers [Conformance] + test/e2e/common/storage/empty_dir.go:227 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:51:55.74 -Jul 29 15:51:55.740: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 15:51:55.742 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:55.776 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:55.78 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 11:51:06.524 +Aug 24 11:51:06.525: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 11:51:06.526 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:51:06.563 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:51:06.569 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should be able to change the type from ExternalName to NodePort [Conformance] - test/e2e/network/service.go:1477 -STEP: creating a service externalname-service with the type=ExternalName in namespace services-7956 07/29/23 15:51:55.784 -STEP: changing the ExternalName service to type=NodePort 07/29/23 15:51:55.795 -STEP: creating replication controller externalname-service in namespace services-7956 07/29/23 15:51:55.84 -I0729 15:51:55.862970 13 runners.go:193] Created replication controller with name: externalname-service, namespace: services-7956, replica count: 2 -I0729 15:51:58.918152 13 runners.go:193] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Jul 29 15:51:58.918: INFO: Creating new exec pod -Jul 29 15:51:58.930: INFO: Waiting up to 5m0s for pod "execpod54dbs" in namespace "services-7956" to be "running" -Jul 29 15:51:58.944: INFO: Pod "execpod54dbs": Phase="Pending", Reason="", readiness=false. Elapsed: 13.943478ms -Jul 29 15:52:00.952: INFO: Pod "execpod54dbs": Phase="Running", Reason="", readiness=true. Elapsed: 2.021351247s -Jul 29 15:52:00.952: INFO: Pod "execpod54dbs" satisfied condition "running" -Jul 29 15:52:01.964: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7956 exec execpod54dbs -- /bin/sh -x -c nc -v -z -w 2 externalname-service 80' -Jul 29 15:52:02.296: INFO: stderr: "+ nc -v -z -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" -Jul 29 15:52:02.296: INFO: stdout: "" -Jul 29 15:52:02.297: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7956 exec execpod54dbs -- /bin/sh -x -c nc -v -z -w 2 10.233.37.223 80' -Jul 29 15:52:02.575: INFO: stderr: "+ nc -v -z -w 2 10.233.37.223 80\nConnection to 10.233.37.223 80 port [tcp/http] succeeded!\n" -Jul 29 15:52:02.575: INFO: stdout: "" -Jul 29 15:52:02.576: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7956 exec execpod54dbs -- /bin/sh -x -c nc -v -z -w 2 192.168.121.120 31712' -Jul 29 15:52:02.815: INFO: stderr: "+ nc -v -z -w 2 192.168.121.120 31712\nConnection to 192.168.121.120 31712 port [tcp/*] succeeded!\n" -Jul 29 15:52:02.815: INFO: stdout: "" -Jul 29 15:52:02.818: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7956 exec execpod54dbs -- /bin/sh -x -c nc -v -z -w 2 192.168.121.141 31712' -Jul 29 15:52:03.078: INFO: stderr: "+ nc -v -z -w 2 192.168.121.141 31712\nConnection to 192.168.121.141 31712 port [tcp/*] succeeded!\n" -Jul 29 15:52:03.078: INFO: stdout: "" -Jul 29 15:52:03.078: INFO: Cleaning up the ExternalName to NodePort test service -[AfterEach] [sig-network] Services +[It] pod should support shared volumes between containers [Conformance] + test/e2e/common/storage/empty_dir.go:227 +STEP: Creating Pod 08/24/23 11:51:06.573 +Aug 24 11:51:06.589: INFO: Waiting up to 5m0s for pod "pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0" in namespace "emptydir-4369" to be "running" +Aug 24 11:51:06.597: INFO: Pod "pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0": Phase="Pending", Reason="", readiness=false. Elapsed: 7.820148ms +Aug 24 11:51:08.604: INFO: Pod "pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0": Phase="Running", Reason="", readiness=false. Elapsed: 2.01464932s +Aug 24 11:51:08.604: INFO: Pod "pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0" satisfied condition "running" +STEP: Reading file content from the nginx-container 08/24/23 11:51:08.604 +Aug 24 11:51:08.604: INFO: ExecWithOptions {Command:[/bin/sh -c cat /usr/share/volumeshare/shareddata.txt] Namespace:emptydir-4369 PodName:pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0 ContainerName:busybox-main-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:51:08.604: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:51:08.606: INFO: ExecWithOptions: Clientset creation +Aug 24 11:51:08.606: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/emptydir-4369/pods/pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0/exec?command=%2Fbin%2Fsh&command=-c&command=cat+%2Fusr%2Fshare%2Fvolumeshare%2Fshareddata.txt&container=busybox-main-container&container=busybox-main-container&stderr=true&stdout=true) +Aug 24 11:51:08.714: INFO: Exec stderr: "" +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 15:52:03.130: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 11:51:08.715: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "services-7956" for this suite. 07/29/23 15:52:03.138 +STEP: Destroying namespace "emptydir-4369" for this suite. 08/24/23 11:51:08.726 ------------------------------ -• [SLOW TEST] [7.412 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should be able to change the type from ExternalName to NodePort [Conformance] - test/e2e/network/service.go:1477 +• [2.215 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + pod should support shared volumes between containers [Conformance] + test/e2e/common/storage/empty_dir.go:227 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:51:55.74 - Jul 29 15:51:55.740: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 15:51:55.742 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:51:55.776 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:51:55.78 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 11:51:06.524 + Aug 24 11:51:06.525: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 11:51:06.526 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:51:06.563 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:51:06.569 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should be able to change the type from ExternalName to NodePort [Conformance] - test/e2e/network/service.go:1477 - STEP: creating a service externalname-service with the type=ExternalName in namespace services-7956 07/29/23 15:51:55.784 - STEP: changing the ExternalName service to type=NodePort 07/29/23 15:51:55.795 - STEP: creating replication controller externalname-service in namespace services-7956 07/29/23 15:51:55.84 - I0729 15:51:55.862970 13 runners.go:193] Created replication controller with name: externalname-service, namespace: services-7956, replica count: 2 - I0729 15:51:58.918152 13 runners.go:193] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - Jul 29 15:51:58.918: INFO: Creating new exec pod - Jul 29 15:51:58.930: INFO: Waiting up to 5m0s for pod "execpod54dbs" in namespace "services-7956" to be "running" - Jul 29 15:51:58.944: INFO: Pod "execpod54dbs": Phase="Pending", Reason="", readiness=false. Elapsed: 13.943478ms - Jul 29 15:52:00.952: INFO: Pod "execpod54dbs": Phase="Running", Reason="", readiness=true. Elapsed: 2.021351247s - Jul 29 15:52:00.952: INFO: Pod "execpod54dbs" satisfied condition "running" - Jul 29 15:52:01.964: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7956 exec execpod54dbs -- /bin/sh -x -c nc -v -z -w 2 externalname-service 80' - Jul 29 15:52:02.296: INFO: stderr: "+ nc -v -z -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" - Jul 29 15:52:02.296: INFO: stdout: "" - Jul 29 15:52:02.297: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7956 exec execpod54dbs -- /bin/sh -x -c nc -v -z -w 2 10.233.37.223 80' - Jul 29 15:52:02.575: INFO: stderr: "+ nc -v -z -w 2 10.233.37.223 80\nConnection to 10.233.37.223 80 port [tcp/http] succeeded!\n" - Jul 29 15:52:02.575: INFO: stdout: "" - Jul 29 15:52:02.576: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7956 exec execpod54dbs -- /bin/sh -x -c nc -v -z -w 2 192.168.121.120 31712' - Jul 29 15:52:02.815: INFO: stderr: "+ nc -v -z -w 2 192.168.121.120 31712\nConnection to 192.168.121.120 31712 port [tcp/*] succeeded!\n" - Jul 29 15:52:02.815: INFO: stdout: "" - Jul 29 15:52:02.818: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7956 exec execpod54dbs -- /bin/sh -x -c nc -v -z -w 2 192.168.121.141 31712' - Jul 29 15:52:03.078: INFO: stderr: "+ nc -v -z -w 2 192.168.121.141 31712\nConnection to 192.168.121.141 31712 port [tcp/*] succeeded!\n" - Jul 29 15:52:03.078: INFO: stdout: "" - Jul 29 15:52:03.078: INFO: Cleaning up the ExternalName to NodePort test service - [AfterEach] [sig-network] Services + [It] pod should support shared volumes between containers [Conformance] + test/e2e/common/storage/empty_dir.go:227 + STEP: Creating Pod 08/24/23 11:51:06.573 + Aug 24 11:51:06.589: INFO: Waiting up to 5m0s for pod "pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0" in namespace "emptydir-4369" to be "running" + Aug 24 11:51:06.597: INFO: Pod "pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0": Phase="Pending", Reason="", readiness=false. Elapsed: 7.820148ms + Aug 24 11:51:08.604: INFO: Pod "pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0": Phase="Running", Reason="", readiness=false. Elapsed: 2.01464932s + Aug 24 11:51:08.604: INFO: Pod "pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0" satisfied condition "running" + STEP: Reading file content from the nginx-container 08/24/23 11:51:08.604 + Aug 24 11:51:08.604: INFO: ExecWithOptions {Command:[/bin/sh -c cat /usr/share/volumeshare/shareddata.txt] Namespace:emptydir-4369 PodName:pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0 ContainerName:busybox-main-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:51:08.604: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:51:08.606: INFO: ExecWithOptions: Clientset creation + Aug 24 11:51:08.606: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/emptydir-4369/pods/pod-sharedvolume-45d1634b-9b4f-4302-ab49-15c1bbbb48a0/exec?command=%2Fbin%2Fsh&command=-c&command=cat+%2Fusr%2Fshare%2Fvolumeshare%2Fshareddata.txt&container=busybox-main-container&container=busybox-main-container&stderr=true&stdout=true) + Aug 24 11:51:08.714: INFO: Exec stderr: "" + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 15:52:03.130: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 11:51:08.715: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "services-7956" for this suite. 07/29/23 15:52:03.138 + STEP: Destroying namespace "emptydir-4369" for this suite. 08/24/23 11:51:08.726 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Secrets - optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:205 -[BeforeEach] [sig-storage] Secrets +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod with mountPath of existing file [Conformance] + test/e2e/storage/subpath.go:80 +[BeforeEach] [sig-storage] Subpath set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:52:03.163 -Jul 29 15:52:03.164: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 15:52:03.166 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:52:03.204 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:52:03.214 -[BeforeEach] [sig-storage] Secrets +STEP: Creating a kubernetes client 08/24/23 11:51:08.745 +Aug 24 11:51:08.745: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename subpath 08/24/23 11:51:08.747 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:51:08.783 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:51:08.79 +[BeforeEach] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:31 -[It] optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:205 -STEP: Creating secret with name s-test-opt-del-61e9430e-11cb-479d-844d-1086dde8c0d6 07/29/23 15:52:03.229 -STEP: Creating secret with name s-test-opt-upd-c8d8acf0-ec51-4aa6-89f8-5c72e22a9334 07/29/23 15:52:03.236 -STEP: Creating the pod 07/29/23 15:52:03.243 -Jul 29 15:52:03.259: INFO: Waiting up to 5m0s for pod "pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853" in namespace "secrets-5034" to be "running and ready" -Jul 29 15:52:03.270: INFO: Pod "pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853": Phase="Pending", Reason="", readiness=false. Elapsed: 10.806738ms -Jul 29 15:52:03.270: INFO: The phase of Pod pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:52:05.278: INFO: Pod "pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853": Phase="Running", Reason="", readiness=true. Elapsed: 2.019097179s -Jul 29 15:52:05.278: INFO: The phase of Pod pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853 is Running (Ready = true) -Jul 29 15:52:05.278: INFO: Pod "pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853" satisfied condition "running and ready" -STEP: Deleting secret s-test-opt-del-61e9430e-11cb-479d-844d-1086dde8c0d6 07/29/23 15:52:05.335 -STEP: Updating secret s-test-opt-upd-c8d8acf0-ec51-4aa6-89f8-5c72e22a9334 07/29/23 15:52:05.352 -STEP: Creating secret with name s-test-opt-create-e9909912-ef54-43c6-87e4-fc4593a337e5 07/29/23 15:52:05.371 -STEP: waiting to observe update in volume 07/29/23 15:52:05.382 -[AfterEach] [sig-storage] Secrets - test/e2e/framework/node/init/init.go:32 -Jul 29 15:52:07.464: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Secrets +[BeforeEach] Atomic writer volumes + test/e2e/storage/subpath.go:40 +STEP: Setting up data 08/24/23 11:51:08.794 +[It] should support subpaths with configmap pod with mountPath of existing file [Conformance] + test/e2e/storage/subpath.go:80 +STEP: Creating pod pod-subpath-test-configmap-xwxv 08/24/23 11:51:08.814 +STEP: Creating a pod to test atomic-volume-subpath 08/24/23 11:51:08.814 +Aug 24 11:51:08.831: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-xwxv" in namespace "subpath-4536" to be "Succeeded or Failed" +Aug 24 11:51:08.840: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Pending", Reason="", readiness=false. Elapsed: 8.236803ms +Aug 24 11:51:10.845: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 2.013858099s +Aug 24 11:51:12.846: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 4.015018463s +Aug 24 11:51:14.848: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 6.016409679s +Aug 24 11:51:16.845: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 8.01321208s +Aug 24 11:51:18.849: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 10.017513528s +Aug 24 11:51:20.847: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 12.015663426s +Aug 24 11:51:22.847: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 14.01510698s +Aug 24 11:51:24.847: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 16.015420477s +Aug 24 11:51:26.848: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 18.016170052s +Aug 24 11:51:28.848: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 20.016617987s +Aug 24 11:51:30.850: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=false. Elapsed: 22.018437238s +Aug 24 11:51:32.851: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.019973761s +STEP: Saw pod success 08/24/23 11:51:32.852 +Aug 24 11:51:32.852: INFO: Pod "pod-subpath-test-configmap-xwxv" satisfied condition "Succeeded or Failed" +Aug 24 11:51:32.860: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-subpath-test-configmap-xwxv container test-container-subpath-configmap-xwxv: +STEP: delete the pod 08/24/23 11:51:32.879 +Aug 24 11:51:32.905: INFO: Waiting for pod pod-subpath-test-configmap-xwxv to disappear +Aug 24 11:51:32.911: INFO: Pod pod-subpath-test-configmap-xwxv no longer exists +STEP: Deleting pod pod-subpath-test-configmap-xwxv 08/24/23 11:51:32.912 +Aug 24 11:51:32.912: INFO: Deleting pod "pod-subpath-test-configmap-xwxv" in namespace "subpath-4536" +[AfterEach] [sig-storage] Subpath + test/e2e/framework/node/init/init.go:32 +Aug 24 11:51:32.919: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-storage] Subpath dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-storage] Subpath tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-5034" for this suite. 07/29/23 15:52:07.476 +STEP: Destroying namespace "subpath-4536" for this suite. 08/24/23 11:51:32.929 ------------------------------ -• [4.327 seconds] -[sig-storage] Secrets -test/e2e/common/storage/framework.go:23 - optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:205 +• [SLOW TEST] [24.201 seconds] +[sig-storage] Subpath +test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + test/e2e/storage/subpath.go:36 + should support subpaths with configmap pod with mountPath of existing file [Conformance] + test/e2e/storage/subpath.go:80 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Secrets + [BeforeEach] [sig-storage] Subpath set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:52:03.163 - Jul 29 15:52:03.164: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 15:52:03.166 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:52:03.204 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:52:03.214 - [BeforeEach] [sig-storage] Secrets + STEP: Creating a kubernetes client 08/24/23 11:51:08.745 + Aug 24 11:51:08.745: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename subpath 08/24/23 11:51:08.747 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:51:08.783 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:51:08.79 + [BeforeEach] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:31 - [It] optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:205 - STEP: Creating secret with name s-test-opt-del-61e9430e-11cb-479d-844d-1086dde8c0d6 07/29/23 15:52:03.229 - STEP: Creating secret with name s-test-opt-upd-c8d8acf0-ec51-4aa6-89f8-5c72e22a9334 07/29/23 15:52:03.236 - STEP: Creating the pod 07/29/23 15:52:03.243 - Jul 29 15:52:03.259: INFO: Waiting up to 5m0s for pod "pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853" in namespace "secrets-5034" to be "running and ready" - Jul 29 15:52:03.270: INFO: Pod "pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853": Phase="Pending", Reason="", readiness=false. Elapsed: 10.806738ms - Jul 29 15:52:03.270: INFO: The phase of Pod pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:52:05.278: INFO: Pod "pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853": Phase="Running", Reason="", readiness=true. Elapsed: 2.019097179s - Jul 29 15:52:05.278: INFO: The phase of Pod pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853 is Running (Ready = true) - Jul 29 15:52:05.278: INFO: Pod "pod-secrets-8485349d-0aed-44d2-99b5-3c3e8bbd7853" satisfied condition "running and ready" - STEP: Deleting secret s-test-opt-del-61e9430e-11cb-479d-844d-1086dde8c0d6 07/29/23 15:52:05.335 - STEP: Updating secret s-test-opt-upd-c8d8acf0-ec51-4aa6-89f8-5c72e22a9334 07/29/23 15:52:05.352 - STEP: Creating secret with name s-test-opt-create-e9909912-ef54-43c6-87e4-fc4593a337e5 07/29/23 15:52:05.371 - STEP: waiting to observe update in volume 07/29/23 15:52:05.382 - [AfterEach] [sig-storage] Secrets + [BeforeEach] Atomic writer volumes + test/e2e/storage/subpath.go:40 + STEP: Setting up data 08/24/23 11:51:08.794 + [It] should support subpaths with configmap pod with mountPath of existing file [Conformance] + test/e2e/storage/subpath.go:80 + STEP: Creating pod pod-subpath-test-configmap-xwxv 08/24/23 11:51:08.814 + STEP: Creating a pod to test atomic-volume-subpath 08/24/23 11:51:08.814 + Aug 24 11:51:08.831: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-xwxv" in namespace "subpath-4536" to be "Succeeded or Failed" + Aug 24 11:51:08.840: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Pending", Reason="", readiness=false. Elapsed: 8.236803ms + Aug 24 11:51:10.845: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 2.013858099s + Aug 24 11:51:12.846: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 4.015018463s + Aug 24 11:51:14.848: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 6.016409679s + Aug 24 11:51:16.845: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 8.01321208s + Aug 24 11:51:18.849: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 10.017513528s + Aug 24 11:51:20.847: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 12.015663426s + Aug 24 11:51:22.847: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 14.01510698s + Aug 24 11:51:24.847: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 16.015420477s + Aug 24 11:51:26.848: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 18.016170052s + Aug 24 11:51:28.848: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=true. Elapsed: 20.016617987s + Aug 24 11:51:30.850: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Running", Reason="", readiness=false. Elapsed: 22.018437238s + Aug 24 11:51:32.851: INFO: Pod "pod-subpath-test-configmap-xwxv": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.019973761s + STEP: Saw pod success 08/24/23 11:51:32.852 + Aug 24 11:51:32.852: INFO: Pod "pod-subpath-test-configmap-xwxv" satisfied condition "Succeeded or Failed" + Aug 24 11:51:32.860: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-subpath-test-configmap-xwxv container test-container-subpath-configmap-xwxv: + STEP: delete the pod 08/24/23 11:51:32.879 + Aug 24 11:51:32.905: INFO: Waiting for pod pod-subpath-test-configmap-xwxv to disappear + Aug 24 11:51:32.911: INFO: Pod pod-subpath-test-configmap-xwxv no longer exists + STEP: Deleting pod pod-subpath-test-configmap-xwxv 08/24/23 11:51:32.912 + Aug 24 11:51:32.912: INFO: Deleting pod "pod-subpath-test-configmap-xwxv" in namespace "subpath-4536" + [AfterEach] [sig-storage] Subpath test/e2e/framework/node/init/init.go:32 - Jul 29 15:52:07.464: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Secrets + Aug 24 11:51:32.919: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-storage] Subpath dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-storage] Subpath tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-5034" for this suite. 07/29/23 15:52:07.476 + STEP: Destroying namespace "subpath-4536" for this suite. 08/24/23 11:51:32.929 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should provide container's cpu limit [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:193 -[BeforeEach] [sig-storage] Projected downwardAPI +[sig-network] Proxy version v1 + should proxy through a service and a pod [Conformance] + test/e2e/network/proxy.go:101 +[BeforeEach] version v1 set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:52:07.495 -Jul 29 15:52:07.495: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 15:52:07.5 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:52:07.539 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:52:07.55 -[BeforeEach] [sig-storage] Projected downwardAPI +STEP: Creating a kubernetes client 08/24/23 11:51:32.957 +Aug 24 11:51:32.957: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename proxy 08/24/23 11:51:32.963 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:51:32.999 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:51:33.007 +[BeforeEach] version v1 test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should provide container's cpu limit [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:193 -STEP: Creating a pod to test downward API volume plugin 07/29/23 15:52:07.557 -Jul 29 15:52:07.577: INFO: Waiting up to 5m0s for pod "downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391" in namespace "projected-1695" to be "Succeeded or Failed" -Jul 29 15:52:07.590: INFO: Pod "downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391": Phase="Pending", Reason="", readiness=false. Elapsed: 12.386313ms -Jul 29 15:52:09.603: INFO: Pod "downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391": Phase="Pending", Reason="", readiness=false. Elapsed: 2.025889544s -Jul 29 15:52:11.604: INFO: Pod "downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026639118s -STEP: Saw pod success 07/29/23 15:52:11.605 -Jul 29 15:52:11.606: INFO: Pod "downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391" satisfied condition "Succeeded or Failed" -Jul 29 15:52:11.612: INFO: Trying to get logs from node wetuj3nuajog-2 pod downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391 container client-container: -STEP: delete the pod 07/29/23 15:52:11.634 -Jul 29 15:52:11.694: INFO: Waiting for pod downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391 to disappear -Jul 29 15:52:11.700: INFO: Pod downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +[It] should proxy through a service and a pod [Conformance] + test/e2e/network/proxy.go:101 +STEP: starting an echo server on multiple ports 08/24/23 11:51:33.045 +STEP: creating replication controller proxy-service-xtkzj in namespace proxy-8717 08/24/23 11:51:33.051 +I0824 11:51:33.069739 14 runners.go:193] Created replication controller with name: proxy-service-xtkzj, namespace: proxy-8717, replica count: 1 +I0824 11:51:34.122105 14 runners.go:193] proxy-service-xtkzj Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0824 11:51:35.122784 14 runners.go:193] proxy-service-xtkzj Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Aug 24 11:51:35.130: INFO: Endpoint proxy-8717/proxy-service-xtkzj is not ready yet +Aug 24 11:51:37.138: INFO: setup took 4.126570161s, starting test cases +STEP: running 16 cases, 20 attempts per case, 320 total attempts 08/24/23 11:51:37.138 +Aug 24 11:51:37.169: INFO: (0) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 28.522903ms) +Aug 24 11:51:37.169: INFO: (0) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 29.398076ms) +Aug 24 11:51:37.172: INFO: (0) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 30.634616ms) +Aug 24 11:51:37.173: INFO: (0) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 33.824865ms) +Aug 24 11:51:37.173: INFO: (0) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 32.173479ms) +Aug 24 11:51:37.173: INFO: (0) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 32.50786ms) +Aug 24 11:51:37.173: INFO: (0) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 32.042531ms) +Aug 24 11:51:37.178: INFO: (0) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 36.218758ms) +Aug 24 11:51:37.178: INFO: (0) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 37.308887ms) +Aug 24 11:51:37.182: INFO: (0) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 39.715858ms) +Aug 24 11:51:37.182: INFO: (0) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 40.088371ms) +Aug 24 11:51:37.182: INFO: (0) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 39.903441ms) +Aug 24 11:51:37.182: INFO: (0) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 40.486589ms) +Aug 24 11:51:37.182: INFO: (0) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 42.497437ms) +Aug 24 11:51:37.183: INFO: (0) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 41.635021ms) +Aug 24 11:51:37.184: INFO: (0) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test<... (200; 25.086291ms) +Aug 24 11:51:37.211: INFO: (1) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 26.613043ms) +Aug 24 11:51:37.212: INFO: (1) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 26.753585ms) +Aug 24 11:51:37.217: INFO: (1) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 31.906206ms) +Aug 24 11:51:37.217: INFO: (1) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 32.873998ms) +Aug 24 11:51:37.219: INFO: (1) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 33.484772ms) +Aug 24 11:51:37.219: INFO: (1) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 34.803439ms) +Aug 24 11:51:37.220: INFO: (1) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 34.594548ms) +Aug 24 11:51:37.221: INFO: (1) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 36.927147ms) +Aug 24 11:51:37.223: INFO: (1) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 36.886405ms) +Aug 24 11:51:37.224: INFO: (1) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 38.556353ms) +Aug 24 11:51:37.245: INFO: (2) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 20.433121ms) +Aug 24 11:51:37.245: INFO: (2) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 20.303791ms) +Aug 24 11:51:37.247: INFO: (2) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 21.027376ms) +Aug 24 11:51:37.248: INFO: (2) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 23.018981ms) +Aug 24 11:51:37.250: INFO: (2) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 24.96934ms) +Aug 24 11:51:37.250: INFO: (2) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 25.118302ms) +Aug 24 11:51:37.251: INFO: (2) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 24.502841ms) +Aug 24 11:51:37.252: INFO: (2) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 27.460364ms) +Aug 24 11:51:37.252: INFO: (2) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 27.809561ms) +Aug 24 11:51:37.252: INFO: (2) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 33.582068ms) +Aug 24 11:51:37.259: INFO: (2) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 34.048449ms) +Aug 24 11:51:37.259: INFO: (2) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 33.692441ms) +Aug 24 11:51:37.262: INFO: (2) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 36.861813ms) +Aug 24 11:51:37.274: INFO: (3) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 11.337526ms) +Aug 24 11:51:37.275: INFO: (3) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 11.520741ms) +Aug 24 11:51:37.278: INFO: (3) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 15.460722ms) +Aug 24 11:51:37.282: INFO: (3) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 19.336439ms) +Aug 24 11:51:37.282: INFO: (3) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 18.871227ms) +Aug 24 11:51:37.284: INFO: (3) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 20.459922ms) +Aug 24 11:51:37.286: INFO: (3) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 22.808705ms) +Aug 24 11:51:37.287: INFO: (3) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 23.186352ms) +Aug 24 11:51:37.291: INFO: (3) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 27.054353ms) +Aug 24 11:51:37.291: INFO: (3) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 26.940457ms) +Aug 24 11:51:37.291: INFO: (3) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 31.859408ms) +Aug 24 11:51:37.297: INFO: (3) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 33.376647ms) +Aug 24 11:51:37.298: INFO: (3) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 33.805351ms) +Aug 24 11:51:37.299: INFO: (3) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 34.501386ms) +Aug 24 11:51:37.323: INFO: (4) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 23.419651ms) +Aug 24 11:51:37.331: INFO: (4) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 33.699125ms) +Aug 24 11:51:37.335: INFO: (4) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 32.987935ms) +Aug 24 11:51:37.343: INFO: (4) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 43.537878ms) +Aug 24 11:51:37.347: INFO: (4) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 45.108472ms) +Aug 24 11:51:37.347: INFO: (4) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 46.236119ms) +Aug 24 11:51:37.347: INFO: (4) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 46.690546ms) +Aug 24 11:51:37.347: INFO: (4) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 46.267377ms) +Aug 24 11:51:37.347: INFO: (4) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 47.467086ms) +Aug 24 11:51:37.348: INFO: (4) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 47.267427ms) +Aug 24 11:51:37.348: INFO: (4) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 48.11728ms) +Aug 24 11:51:37.348: INFO: (4) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 48.114615ms) +Aug 24 11:51:37.348: INFO: (4) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 48.688187ms) +Aug 24 11:51:37.348: INFO: (4) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 47.160844ms) +Aug 24 11:51:37.364: INFO: (5) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 15.522742ms) +Aug 24 11:51:37.370: INFO: (5) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 20.439746ms) +Aug 24 11:51:37.371: INFO: (5) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 21.79586ms) +Aug 24 11:51:37.373: INFO: (5) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 23.3018ms) +Aug 24 11:51:37.373: INFO: (5) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 22.620725ms) +Aug 24 11:51:37.374: INFO: (5) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 23.823797ms) +Aug 24 11:51:37.380: INFO: (5) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 30.690922ms) +Aug 24 11:51:37.380: INFO: (5) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 29.846445ms) +Aug 24 11:51:37.381: INFO: (5) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 32.052231ms) +Aug 24 11:51:37.381: INFO: (5) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 31.186761ms) +Aug 24 11:51:37.382: INFO: (5) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test<... (200; 24.489349ms) +Aug 24 11:51:37.410: INFO: (6) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 26.88738ms) +Aug 24 11:51:37.414: INFO: (6) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 26.593402ms) +Aug 24 11:51:37.414: INFO: (6) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 26.997856ms) +Aug 24 11:51:37.414: INFO: (6) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 26.85147ms) +Aug 24 11:51:37.415: INFO: (6) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 27.356661ms) +Aug 24 11:51:37.415: INFO: (6) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 27.724933ms) +Aug 24 11:51:37.416: INFO: (6) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 29.225761ms) +Aug 24 11:51:37.417: INFO: (6) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 29.965273ms) +Aug 24 11:51:37.430: INFO: (7) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 12.445567ms) +Aug 24 11:51:37.430: INFO: (7) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 12.5745ms) +Aug 24 11:51:37.436: INFO: (7) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 18.115475ms) +Aug 24 11:51:37.437: INFO: (7) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 19.1481ms) +Aug 24 11:51:37.437: INFO: (7) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 18.849719ms) +Aug 24 11:51:37.437: INFO: (7) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 18.688084ms) +Aug 24 11:51:37.437: INFO: (7) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 14.491022ms) +Aug 24 11:51:37.457: INFO: (8) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 14.29584ms) +Aug 24 11:51:37.458: INFO: (8) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 14.723869ms) +Aug 24 11:51:37.465: INFO: (8) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 21.475991ms) +Aug 24 11:51:37.467: INFO: (8) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 23.534775ms) +Aug 24 11:51:37.467: INFO: (8) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 23.809043ms) +Aug 24 11:51:37.468: INFO: (8) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test<... (200; 28.085565ms) +Aug 24 11:51:37.472: INFO: (8) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 29.256385ms) +Aug 24 11:51:37.472: INFO: (8) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 28.519903ms) +Aug 24 11:51:37.474: INFO: (8) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 31.270202ms) +Aug 24 11:51:37.475: INFO: (8) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 31.219872ms) +Aug 24 11:51:37.475: INFO: (8) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 31.075299ms) +Aug 24 11:51:37.488: INFO: (9) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 13.32175ms) +Aug 24 11:51:37.489: INFO: (9) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 12.983146ms) +Aug 24 11:51:37.491: INFO: (9) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 15.909536ms) +Aug 24 11:51:37.496: INFO: (9) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 20.757405ms) +Aug 24 11:51:37.499: INFO: (9) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 22.628104ms) +Aug 24 11:51:37.499: INFO: (9) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 23.974649ms) +Aug 24 11:51:37.499: INFO: (9) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 23.795444ms) +Aug 24 11:51:37.500: INFO: (9) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 23.92096ms) +Aug 24 11:51:37.500: INFO: (9) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test<... (200; 24.17641ms) +Aug 24 11:51:37.501: INFO: (9) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 26.219438ms) +Aug 24 11:51:37.502: INFO: (9) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 25.749255ms) +Aug 24 11:51:37.502: INFO: (9) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 27.06246ms) +Aug 24 11:51:37.503: INFO: (9) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 27.673484ms) +Aug 24 11:51:37.503: INFO: (9) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 27.150857ms) +Aug 24 11:51:37.504: INFO: (9) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 27.591189ms) +Aug 24 11:51:37.520: INFO: (10) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 16.125613ms) +Aug 24 11:51:37.522: INFO: (10) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 17.650153ms) +Aug 24 11:51:37.522: INFO: (10) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 17.864148ms) +Aug 24 11:51:37.524: INFO: (10) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 19.388775ms) +Aug 24 11:51:37.525: INFO: (10) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 20.259796ms) +Aug 24 11:51:37.527: INFO: (10) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 24.204708ms) +Aug 24 11:51:37.530: INFO: (10) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 24.628524ms) +Aug 24 11:51:37.530: INFO: (10) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 24.963742ms) +Aug 24 11:51:37.531: INFO: (10) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 26.423323ms) +Aug 24 11:51:37.531: INFO: (10) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 26.467246ms) +Aug 24 11:51:37.531: INFO: (10) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 26.471007ms) +Aug 24 11:51:37.531: INFO: (10) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 26.814154ms) +Aug 24 11:51:37.531: INFO: (10) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 26.170129ms) +Aug 24 11:51:37.532: INFO: (10) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 27.318892ms) +Aug 24 11:51:37.552: INFO: (11) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 19.412225ms) +Aug 24 11:51:37.553: INFO: (11) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 20.378284ms) +Aug 24 11:51:37.558: INFO: (11) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 24.662806ms) +Aug 24 11:51:37.561: INFO: (11) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 27.645139ms) +Aug 24 11:51:37.564: INFO: (11) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 30.470394ms) +Aug 24 11:51:37.565: INFO: (11) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 30.58965ms) +Aug 24 11:51:37.566: INFO: (11) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 33.409102ms) +Aug 24 11:51:37.566: INFO: (11) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 32.544232ms) +Aug 24 11:51:37.567: INFO: (11) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 31.992134ms) +Aug 24 11:51:37.567: INFO: (11) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 34.156377ms) +Aug 24 11:51:37.569: INFO: (11) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 34.095838ms) +Aug 24 11:51:37.570: INFO: (11) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 36.928613ms) +Aug 24 11:51:37.571: INFO: (11) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 36.714517ms) +Aug 24 11:51:37.572: INFO: (11) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 38.537793ms) +Aug 24 11:51:37.585: INFO: (12) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 12.090371ms) +Aug 24 11:51:37.587: INFO: (12) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 14.090782ms) +Aug 24 11:51:37.590: INFO: (12) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 16.88185ms) +Aug 24 11:51:37.591: INFO: (12) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 17.591153ms) +Aug 24 11:51:37.591: INFO: (12) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 17.817852ms) +Aug 24 11:51:37.592: INFO: (12) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 19.293253ms) +Aug 24 11:51:37.592: INFO: (12) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 18.331589ms) +Aug 24 11:51:37.596: INFO: (12) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 21.828908ms) +Aug 24 11:51:37.596: INFO: (12) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 22.86005ms) +Aug 24 11:51:37.596: INFO: (12) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 22.666103ms) +Aug 24 11:51:37.599: INFO: (12) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 25.078056ms) +Aug 24 11:51:37.601: INFO: (12) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 33.613248ms) +Aug 24 11:51:37.622: INFO: (13) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 13.820007ms) +Aug 24 11:51:37.626: INFO: (13) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 17.091822ms) +Aug 24 11:51:37.626: INFO: (13) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 27.437282ms) +Aug 24 11:51:37.637: INFO: (13) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 27.937118ms) +Aug 24 11:51:37.637: INFO: (13) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 28.614409ms) +Aug 24 11:51:37.637: INFO: (13) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 28.895745ms) +Aug 24 11:51:37.637: INFO: (13) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 28.626462ms) +Aug 24 11:51:37.638: INFO: (13) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 29.407067ms) +Aug 24 11:51:37.638: INFO: (13) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 30.270699ms) +Aug 24 11:51:37.639: INFO: (13) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 30.101543ms) +Aug 24 11:51:37.639: INFO: (13) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 30.72754ms) +Aug 24 11:51:37.650: INFO: (14) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 10.364019ms) +Aug 24 11:51:37.651: INFO: (14) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 11.109575ms) +Aug 24 11:51:37.652: INFO: (14) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 11.972416ms) +Aug 24 11:51:37.658: INFO: (14) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 18.147397ms) +Aug 24 11:51:37.659: INFO: (14) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 19.440583ms) +Aug 24 11:51:37.661: INFO: (14) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 20.392691ms) +Aug 24 11:51:37.665: INFO: (14) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 25.222153ms) +Aug 24 11:51:37.665: INFO: (14) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 25.134679ms) +Aug 24 11:51:37.665: INFO: (14) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 26.17759ms) +Aug 24 11:51:37.665: INFO: (14) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 24.899931ms) +Aug 24 11:51:37.666: INFO: (14) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 26.65573ms) +Aug 24 11:51:37.667: INFO: (14) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 25.720598ms) +Aug 24 11:51:37.668: INFO: (14) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 27.237623ms) +Aug 24 11:51:37.669: INFO: (14) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 20.40674ms) +Aug 24 11:51:37.693: INFO: (15) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 22.040497ms) +Aug 24 11:51:37.693: INFO: (15) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 23.107868ms) +Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 23.27314ms) +Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 23.374256ms) +Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 23.791547ms) +Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 23.138342ms) +Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 22.869505ms) +Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 22.65983ms) +Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 74.688539ms) +Aug 24 11:51:37.778: INFO: (16) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 79.435773ms) +Aug 24 11:51:37.779: INFO: (16) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 79.897017ms) +Aug 24 11:51:37.786: INFO: (16) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 86.61411ms) +Aug 24 11:51:37.787: INFO: (16) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 87.297453ms) +Aug 24 11:51:37.787: INFO: (16) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 87.447915ms) +Aug 24 11:51:37.787: INFO: (16) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 88.249581ms) +Aug 24 11:51:37.787: INFO: (16) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 88.009258ms) +Aug 24 11:51:37.787: INFO: (16) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 87.66611ms) +Aug 24 11:51:37.788: INFO: (16) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 88.725693ms) +Aug 24 11:51:37.790: INFO: (16) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 90.322596ms) +Aug 24 11:51:37.791: INFO: (16) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 91.494283ms) +Aug 24 11:51:37.792: INFO: (16) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 92.612271ms) +Aug 24 11:51:37.823: INFO: (17) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 30.261926ms) +Aug 24 11:51:37.827: INFO: (17) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 76.525487ms) +Aug 24 11:51:37.873: INFO: (17) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 79.967238ms) +Aug 24 11:51:37.878: INFO: (17) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 83.908016ms) +Aug 24 11:51:37.878: INFO: (17) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 86.212052ms) +Aug 24 11:51:37.879: INFO: (17) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 84.4285ms) +Aug 24 11:51:37.879: INFO: (17) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 84.681732ms) +Aug 24 11:51:37.917: INFO: (18) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 36.832525ms) +Aug 24 11:51:37.921: INFO: (18) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 41.415566ms) +Aug 24 11:51:37.921: INFO: (18) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 41.538356ms) +Aug 24 11:51:37.924: INFO: (18) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 43.493345ms) +Aug 24 11:51:37.924: INFO: (18) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 43.79925ms) +Aug 24 11:51:37.925: INFO: (18) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 22.80696ms) +Aug 24 11:51:37.956: INFO: (19) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 23.431499ms) +Aug 24 11:51:37.960: INFO: (19) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 27.527471ms) +Aug 24 11:51:37.961: INFO: (19) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 26.819525ms) +Aug 24 11:51:37.966: INFO: (19) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 31.777711ms) +Aug 24 11:51:37.967: INFO: (19) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test<... (200; 35.818124ms) +Aug 24 11:51:37.972: INFO: (19) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 39.074963ms) +Aug 24 11:51:37.972: INFO: (19) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 37.878072ms) +Aug 24 11:51:37.973: INFO: (19) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 40.015063ms) +Aug 24 11:51:37.976: INFO: (19) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 42.566629ms) +STEP: deleting ReplicationController proxy-service-xtkzj in namespace proxy-8717, will wait for the garbage collector to delete the pods 08/24/23 11:51:37.976 +Aug 24 11:51:38.043: INFO: Deleting ReplicationController proxy-service-xtkzj took: 8.744417ms +Aug 24 11:51:38.143: INFO: Terminating ReplicationController proxy-service-xtkzj pods took: 100.890478ms +[AfterEach] version v1 test/e2e/framework/node/init/init.go:32 -Jul 29 15:52:11.700: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +Aug 24 11:51:39.145: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] version v1 test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] version v1 dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] version v1 tear down framework | framework.go:193 -STEP: Destroying namespace "projected-1695" for this suite. 07/29/23 15:52:11.709 +STEP: Destroying namespace "proxy-8717" for this suite. 08/24/23 11:51:39.165 ------------------------------ -• [4.231 seconds] -[sig-storage] Projected downwardAPI -test/e2e/common/storage/framework.go:23 - should provide container's cpu limit [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:193 +• [SLOW TEST] [6.227 seconds] +[sig-network] Proxy +test/e2e/network/common/framework.go:23 + version v1 + test/e2e/network/proxy.go:74 + should proxy through a service and a pod [Conformance] + test/e2e/network/proxy.go:101 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI + [BeforeEach] version v1 set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:52:07.495 - Jul 29 15:52:07.495: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 15:52:07.5 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:52:07.539 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:52:07.55 - [BeforeEach] [sig-storage] Projected downwardAPI + STEP: Creating a kubernetes client 08/24/23 11:51:32.957 + Aug 24 11:51:32.957: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename proxy 08/24/23 11:51:32.963 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:51:32.999 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:51:33.007 + [BeforeEach] version v1 test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should provide container's cpu limit [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:193 - STEP: Creating a pod to test downward API volume plugin 07/29/23 15:52:07.557 - Jul 29 15:52:07.577: INFO: Waiting up to 5m0s for pod "downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391" in namespace "projected-1695" to be "Succeeded or Failed" - Jul 29 15:52:07.590: INFO: Pod "downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391": Phase="Pending", Reason="", readiness=false. Elapsed: 12.386313ms - Jul 29 15:52:09.603: INFO: Pod "downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391": Phase="Pending", Reason="", readiness=false. Elapsed: 2.025889544s - Jul 29 15:52:11.604: INFO: Pod "downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026639118s - STEP: Saw pod success 07/29/23 15:52:11.605 - Jul 29 15:52:11.606: INFO: Pod "downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391" satisfied condition "Succeeded or Failed" - Jul 29 15:52:11.612: INFO: Trying to get logs from node wetuj3nuajog-2 pod downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391 container client-container: - STEP: delete the pod 07/29/23 15:52:11.634 - Jul 29 15:52:11.694: INFO: Waiting for pod downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391 to disappear - Jul 29 15:52:11.700: INFO: Pod downwardapi-volume-4347d9c7-424f-4b8b-a156-4666e74ef391 no longer exists - [AfterEach] [sig-storage] Projected downwardAPI + [It] should proxy through a service and a pod [Conformance] + test/e2e/network/proxy.go:101 + STEP: starting an echo server on multiple ports 08/24/23 11:51:33.045 + STEP: creating replication controller proxy-service-xtkzj in namespace proxy-8717 08/24/23 11:51:33.051 + I0824 11:51:33.069739 14 runners.go:193] Created replication controller with name: proxy-service-xtkzj, namespace: proxy-8717, replica count: 1 + I0824 11:51:34.122105 14 runners.go:193] proxy-service-xtkzj Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady + I0824 11:51:35.122784 14 runners.go:193] proxy-service-xtkzj Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + Aug 24 11:51:35.130: INFO: Endpoint proxy-8717/proxy-service-xtkzj is not ready yet + Aug 24 11:51:37.138: INFO: setup took 4.126570161s, starting test cases + STEP: running 16 cases, 20 attempts per case, 320 total attempts 08/24/23 11:51:37.138 + Aug 24 11:51:37.169: INFO: (0) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 28.522903ms) + Aug 24 11:51:37.169: INFO: (0) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 29.398076ms) + Aug 24 11:51:37.172: INFO: (0) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 30.634616ms) + Aug 24 11:51:37.173: INFO: (0) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 33.824865ms) + Aug 24 11:51:37.173: INFO: (0) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 32.173479ms) + Aug 24 11:51:37.173: INFO: (0) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 32.50786ms) + Aug 24 11:51:37.173: INFO: (0) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 32.042531ms) + Aug 24 11:51:37.178: INFO: (0) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 36.218758ms) + Aug 24 11:51:37.178: INFO: (0) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 37.308887ms) + Aug 24 11:51:37.182: INFO: (0) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 39.715858ms) + Aug 24 11:51:37.182: INFO: (0) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 40.088371ms) + Aug 24 11:51:37.182: INFO: (0) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 39.903441ms) + Aug 24 11:51:37.182: INFO: (0) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 40.486589ms) + Aug 24 11:51:37.182: INFO: (0) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 42.497437ms) + Aug 24 11:51:37.183: INFO: (0) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 41.635021ms) + Aug 24 11:51:37.184: INFO: (0) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test<... (200; 25.086291ms) + Aug 24 11:51:37.211: INFO: (1) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 26.613043ms) + Aug 24 11:51:37.212: INFO: (1) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 26.753585ms) + Aug 24 11:51:37.217: INFO: (1) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 31.906206ms) + Aug 24 11:51:37.217: INFO: (1) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 32.873998ms) + Aug 24 11:51:37.219: INFO: (1) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 33.484772ms) + Aug 24 11:51:37.219: INFO: (1) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 34.803439ms) + Aug 24 11:51:37.220: INFO: (1) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 34.594548ms) + Aug 24 11:51:37.221: INFO: (1) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 36.927147ms) + Aug 24 11:51:37.223: INFO: (1) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 36.886405ms) + Aug 24 11:51:37.224: INFO: (1) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 38.556353ms) + Aug 24 11:51:37.245: INFO: (2) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 20.433121ms) + Aug 24 11:51:37.245: INFO: (2) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 20.303791ms) + Aug 24 11:51:37.247: INFO: (2) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 21.027376ms) + Aug 24 11:51:37.248: INFO: (2) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 23.018981ms) + Aug 24 11:51:37.250: INFO: (2) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 24.96934ms) + Aug 24 11:51:37.250: INFO: (2) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 25.118302ms) + Aug 24 11:51:37.251: INFO: (2) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 24.502841ms) + Aug 24 11:51:37.252: INFO: (2) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 27.460364ms) + Aug 24 11:51:37.252: INFO: (2) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 27.809561ms) + Aug 24 11:51:37.252: INFO: (2) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 33.582068ms) + Aug 24 11:51:37.259: INFO: (2) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 34.048449ms) + Aug 24 11:51:37.259: INFO: (2) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 33.692441ms) + Aug 24 11:51:37.262: INFO: (2) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 36.861813ms) + Aug 24 11:51:37.274: INFO: (3) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 11.337526ms) + Aug 24 11:51:37.275: INFO: (3) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 11.520741ms) + Aug 24 11:51:37.278: INFO: (3) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 15.460722ms) + Aug 24 11:51:37.282: INFO: (3) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 19.336439ms) + Aug 24 11:51:37.282: INFO: (3) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 18.871227ms) + Aug 24 11:51:37.284: INFO: (3) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 20.459922ms) + Aug 24 11:51:37.286: INFO: (3) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 22.808705ms) + Aug 24 11:51:37.287: INFO: (3) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 23.186352ms) + Aug 24 11:51:37.291: INFO: (3) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 27.054353ms) + Aug 24 11:51:37.291: INFO: (3) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 26.940457ms) + Aug 24 11:51:37.291: INFO: (3) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 31.859408ms) + Aug 24 11:51:37.297: INFO: (3) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 33.376647ms) + Aug 24 11:51:37.298: INFO: (3) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 33.805351ms) + Aug 24 11:51:37.299: INFO: (3) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 34.501386ms) + Aug 24 11:51:37.323: INFO: (4) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 23.419651ms) + Aug 24 11:51:37.331: INFO: (4) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 33.699125ms) + Aug 24 11:51:37.335: INFO: (4) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 32.987935ms) + Aug 24 11:51:37.343: INFO: (4) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 43.537878ms) + Aug 24 11:51:37.347: INFO: (4) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 45.108472ms) + Aug 24 11:51:37.347: INFO: (4) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 46.236119ms) + Aug 24 11:51:37.347: INFO: (4) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 46.690546ms) + Aug 24 11:51:37.347: INFO: (4) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 46.267377ms) + Aug 24 11:51:37.347: INFO: (4) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 47.467086ms) + Aug 24 11:51:37.348: INFO: (4) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 47.267427ms) + Aug 24 11:51:37.348: INFO: (4) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 48.11728ms) + Aug 24 11:51:37.348: INFO: (4) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 48.114615ms) + Aug 24 11:51:37.348: INFO: (4) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 48.688187ms) + Aug 24 11:51:37.348: INFO: (4) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 47.160844ms) + Aug 24 11:51:37.364: INFO: (5) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 15.522742ms) + Aug 24 11:51:37.370: INFO: (5) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 20.439746ms) + Aug 24 11:51:37.371: INFO: (5) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 21.79586ms) + Aug 24 11:51:37.373: INFO: (5) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 23.3018ms) + Aug 24 11:51:37.373: INFO: (5) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 22.620725ms) + Aug 24 11:51:37.374: INFO: (5) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 23.823797ms) + Aug 24 11:51:37.380: INFO: (5) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 30.690922ms) + Aug 24 11:51:37.380: INFO: (5) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 29.846445ms) + Aug 24 11:51:37.381: INFO: (5) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 32.052231ms) + Aug 24 11:51:37.381: INFO: (5) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 31.186761ms) + Aug 24 11:51:37.382: INFO: (5) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test<... (200; 24.489349ms) + Aug 24 11:51:37.410: INFO: (6) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 26.88738ms) + Aug 24 11:51:37.414: INFO: (6) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 26.593402ms) + Aug 24 11:51:37.414: INFO: (6) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 26.997856ms) + Aug 24 11:51:37.414: INFO: (6) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 26.85147ms) + Aug 24 11:51:37.415: INFO: (6) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 27.356661ms) + Aug 24 11:51:37.415: INFO: (6) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 27.724933ms) + Aug 24 11:51:37.416: INFO: (6) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 29.225761ms) + Aug 24 11:51:37.417: INFO: (6) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 29.965273ms) + Aug 24 11:51:37.430: INFO: (7) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 12.445567ms) + Aug 24 11:51:37.430: INFO: (7) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 12.5745ms) + Aug 24 11:51:37.436: INFO: (7) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 18.115475ms) + Aug 24 11:51:37.437: INFO: (7) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 19.1481ms) + Aug 24 11:51:37.437: INFO: (7) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 18.849719ms) + Aug 24 11:51:37.437: INFO: (7) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 18.688084ms) + Aug 24 11:51:37.437: INFO: (7) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 14.491022ms) + Aug 24 11:51:37.457: INFO: (8) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 14.29584ms) + Aug 24 11:51:37.458: INFO: (8) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 14.723869ms) + Aug 24 11:51:37.465: INFO: (8) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 21.475991ms) + Aug 24 11:51:37.467: INFO: (8) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 23.534775ms) + Aug 24 11:51:37.467: INFO: (8) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 23.809043ms) + Aug 24 11:51:37.468: INFO: (8) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test<... (200; 28.085565ms) + Aug 24 11:51:37.472: INFO: (8) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 29.256385ms) + Aug 24 11:51:37.472: INFO: (8) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 28.519903ms) + Aug 24 11:51:37.474: INFO: (8) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 31.270202ms) + Aug 24 11:51:37.475: INFO: (8) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 31.219872ms) + Aug 24 11:51:37.475: INFO: (8) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 31.075299ms) + Aug 24 11:51:37.488: INFO: (9) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 13.32175ms) + Aug 24 11:51:37.489: INFO: (9) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 12.983146ms) + Aug 24 11:51:37.491: INFO: (9) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 15.909536ms) + Aug 24 11:51:37.496: INFO: (9) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 20.757405ms) + Aug 24 11:51:37.499: INFO: (9) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 22.628104ms) + Aug 24 11:51:37.499: INFO: (9) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 23.974649ms) + Aug 24 11:51:37.499: INFO: (9) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 23.795444ms) + Aug 24 11:51:37.500: INFO: (9) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 23.92096ms) + Aug 24 11:51:37.500: INFO: (9) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test<... (200; 24.17641ms) + Aug 24 11:51:37.501: INFO: (9) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 26.219438ms) + Aug 24 11:51:37.502: INFO: (9) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 25.749255ms) + Aug 24 11:51:37.502: INFO: (9) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 27.06246ms) + Aug 24 11:51:37.503: INFO: (9) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 27.673484ms) + Aug 24 11:51:37.503: INFO: (9) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 27.150857ms) + Aug 24 11:51:37.504: INFO: (9) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 27.591189ms) + Aug 24 11:51:37.520: INFO: (10) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 16.125613ms) + Aug 24 11:51:37.522: INFO: (10) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 17.650153ms) + Aug 24 11:51:37.522: INFO: (10) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 17.864148ms) + Aug 24 11:51:37.524: INFO: (10) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 19.388775ms) + Aug 24 11:51:37.525: INFO: (10) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 20.259796ms) + Aug 24 11:51:37.527: INFO: (10) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 24.204708ms) + Aug 24 11:51:37.530: INFO: (10) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 24.628524ms) + Aug 24 11:51:37.530: INFO: (10) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 24.963742ms) + Aug 24 11:51:37.531: INFO: (10) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 26.423323ms) + Aug 24 11:51:37.531: INFO: (10) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 26.467246ms) + Aug 24 11:51:37.531: INFO: (10) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 26.471007ms) + Aug 24 11:51:37.531: INFO: (10) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 26.814154ms) + Aug 24 11:51:37.531: INFO: (10) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 26.170129ms) + Aug 24 11:51:37.532: INFO: (10) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 27.318892ms) + Aug 24 11:51:37.552: INFO: (11) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 19.412225ms) + Aug 24 11:51:37.553: INFO: (11) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 20.378284ms) + Aug 24 11:51:37.558: INFO: (11) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 24.662806ms) + Aug 24 11:51:37.561: INFO: (11) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 27.645139ms) + Aug 24 11:51:37.564: INFO: (11) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 30.470394ms) + Aug 24 11:51:37.565: INFO: (11) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 30.58965ms) + Aug 24 11:51:37.566: INFO: (11) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 33.409102ms) + Aug 24 11:51:37.566: INFO: (11) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 32.544232ms) + Aug 24 11:51:37.567: INFO: (11) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 31.992134ms) + Aug 24 11:51:37.567: INFO: (11) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 34.156377ms) + Aug 24 11:51:37.569: INFO: (11) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 34.095838ms) + Aug 24 11:51:37.570: INFO: (11) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 36.928613ms) + Aug 24 11:51:37.571: INFO: (11) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 36.714517ms) + Aug 24 11:51:37.572: INFO: (11) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 38.537793ms) + Aug 24 11:51:37.585: INFO: (12) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 12.090371ms) + Aug 24 11:51:37.587: INFO: (12) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 14.090782ms) + Aug 24 11:51:37.590: INFO: (12) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 16.88185ms) + Aug 24 11:51:37.591: INFO: (12) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 17.591153ms) + Aug 24 11:51:37.591: INFO: (12) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 17.817852ms) + Aug 24 11:51:37.592: INFO: (12) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 19.293253ms) + Aug 24 11:51:37.592: INFO: (12) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 18.331589ms) + Aug 24 11:51:37.596: INFO: (12) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 21.828908ms) + Aug 24 11:51:37.596: INFO: (12) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 22.86005ms) + Aug 24 11:51:37.596: INFO: (12) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 22.666103ms) + Aug 24 11:51:37.599: INFO: (12) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 25.078056ms) + Aug 24 11:51:37.601: INFO: (12) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 33.613248ms) + Aug 24 11:51:37.622: INFO: (13) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 13.820007ms) + Aug 24 11:51:37.626: INFO: (13) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 17.091822ms) + Aug 24 11:51:37.626: INFO: (13) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 27.437282ms) + Aug 24 11:51:37.637: INFO: (13) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 27.937118ms) + Aug 24 11:51:37.637: INFO: (13) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 28.614409ms) + Aug 24 11:51:37.637: INFO: (13) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 28.895745ms) + Aug 24 11:51:37.637: INFO: (13) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 28.626462ms) + Aug 24 11:51:37.638: INFO: (13) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 29.407067ms) + Aug 24 11:51:37.638: INFO: (13) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 30.270699ms) + Aug 24 11:51:37.639: INFO: (13) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 30.101543ms) + Aug 24 11:51:37.639: INFO: (13) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 30.72754ms) + Aug 24 11:51:37.650: INFO: (14) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 10.364019ms) + Aug 24 11:51:37.651: INFO: (14) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 11.109575ms) + Aug 24 11:51:37.652: INFO: (14) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 11.972416ms) + Aug 24 11:51:37.658: INFO: (14) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 18.147397ms) + Aug 24 11:51:37.659: INFO: (14) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 19.440583ms) + Aug 24 11:51:37.661: INFO: (14) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 20.392691ms) + Aug 24 11:51:37.665: INFO: (14) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 25.222153ms) + Aug 24 11:51:37.665: INFO: (14) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 25.134679ms) + Aug 24 11:51:37.665: INFO: (14) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 26.17759ms) + Aug 24 11:51:37.665: INFO: (14) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 24.899931ms) + Aug 24 11:51:37.666: INFO: (14) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 26.65573ms) + Aug 24 11:51:37.667: INFO: (14) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 25.720598ms) + Aug 24 11:51:37.668: INFO: (14) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 27.237623ms) + Aug 24 11:51:37.669: INFO: (14) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 20.40674ms) + Aug 24 11:51:37.693: INFO: (15) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 22.040497ms) + Aug 24 11:51:37.693: INFO: (15) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 23.107868ms) + Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 23.27314ms) + Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 23.374256ms) + Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 23.791547ms) + Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 23.138342ms) + Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 22.869505ms) + Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 22.65983ms) + Aug 24 11:51:37.694: INFO: (15) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test (200; 74.688539ms) + Aug 24 11:51:37.778: INFO: (16) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname2/proxy/: bar (200; 79.435773ms) + Aug 24 11:51:37.779: INFO: (16) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 79.897017ms) + Aug 24 11:51:37.786: INFO: (16) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:462/proxy/: tls qux (200; 86.61411ms) + Aug 24 11:51:37.787: INFO: (16) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 87.297453ms) + Aug 24 11:51:37.787: INFO: (16) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 87.447915ms) + Aug 24 11:51:37.787: INFO: (16) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:460/proxy/: tls baz (200; 88.249581ms) + Aug 24 11:51:37.787: INFO: (16) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 88.009258ms) + Aug 24 11:51:37.787: INFO: (16) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 87.66611ms) + Aug 24 11:51:37.788: INFO: (16) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 88.725693ms) + Aug 24 11:51:37.790: INFO: (16) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 90.322596ms) + Aug 24 11:51:37.791: INFO: (16) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 91.494283ms) + Aug 24 11:51:37.792: INFO: (16) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 92.612271ms) + Aug 24 11:51:37.823: INFO: (17) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 30.261926ms) + Aug 24 11:51:37.827: INFO: (17) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 76.525487ms) + Aug 24 11:51:37.873: INFO: (17) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 79.967238ms) + Aug 24 11:51:37.878: INFO: (17) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 83.908016ms) + Aug 24 11:51:37.878: INFO: (17) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 86.212052ms) + Aug 24 11:51:37.879: INFO: (17) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 84.4285ms) + Aug 24 11:51:37.879: INFO: (17) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 84.681732ms) + Aug 24 11:51:37.917: INFO: (18) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 36.832525ms) + Aug 24 11:51:37.921: INFO: (18) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 41.415566ms) + Aug 24 11:51:37.921: INFO: (18) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:1080/proxy/: ... (200; 41.538356ms) + Aug 24 11:51:37.924: INFO: (18) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:1080/proxy/: test<... (200; 43.493345ms) + Aug 24 11:51:37.924: INFO: (18) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 43.79925ms) + Aug 24 11:51:37.925: INFO: (18) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: ... (200; 22.80696ms) + Aug 24 11:51:37.956: INFO: (19) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7/proxy/: test (200; 23.431499ms) + Aug 24 11:51:37.960: INFO: (19) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname2/proxy/: bar (200; 27.527471ms) + Aug 24 11:51:37.961: INFO: (19) /api/v1/namespaces/proxy-8717/pods/http:proxy-service-xtkzj-7ftp7:162/proxy/: bar (200; 26.819525ms) + Aug 24 11:51:37.966: INFO: (19) /api/v1/namespaces/proxy-8717/services/proxy-service-xtkzj:portname1/proxy/: foo (200; 31.777711ms) + Aug 24 11:51:37.967: INFO: (19) /api/v1/namespaces/proxy-8717/pods/https:proxy-service-xtkzj-7ftp7:443/proxy/: test<... (200; 35.818124ms) + Aug 24 11:51:37.972: INFO: (19) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname2/proxy/: tls qux (200; 39.074963ms) + Aug 24 11:51:37.972: INFO: (19) /api/v1/namespaces/proxy-8717/pods/proxy-service-xtkzj-7ftp7:160/proxy/: foo (200; 37.878072ms) + Aug 24 11:51:37.973: INFO: (19) /api/v1/namespaces/proxy-8717/services/http:proxy-service-xtkzj:portname1/proxy/: foo (200; 40.015063ms) + Aug 24 11:51:37.976: INFO: (19) /api/v1/namespaces/proxy-8717/services/https:proxy-service-xtkzj:tlsportname1/proxy/: tls baz (200; 42.566629ms) + STEP: deleting ReplicationController proxy-service-xtkzj in namespace proxy-8717, will wait for the garbage collector to delete the pods 08/24/23 11:51:37.976 + Aug 24 11:51:38.043: INFO: Deleting ReplicationController proxy-service-xtkzj took: 8.744417ms + Aug 24 11:51:38.143: INFO: Terminating ReplicationController proxy-service-xtkzj pods took: 100.890478ms + [AfterEach] version v1 test/e2e/framework/node/init/init.go:32 - Jul 29 15:52:11.700: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 11:51:39.145: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] version v1 test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] version v1 dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] version v1 tear down framework | framework.go:193 - STEP: Destroying namespace "projected-1695" for this suite. 07/29/23 15:52:11.709 + STEP: Destroying namespace "proxy-8717" for this suite. 08/24/23 11:51:39.165 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSSSSSSS ------------------------------ -[sig-node] Variable Expansion - should succeed in writing subpaths in container [Slow] [Conformance] - test/e2e/common/node/expansion.go:297 -[BeforeEach] [sig-node] Variable Expansion +[sig-storage] Subpath Atomic writer volumes + should support subpaths with downward pod [Conformance] + test/e2e/storage/subpath.go:92 +[BeforeEach] [sig-storage] Subpath set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:52:11.727 -Jul 29 15:52:11.727: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename var-expansion 07/29/23 15:52:11.731 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:52:11.761 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:52:11.766 -[BeforeEach] [sig-node] Variable Expansion +STEP: Creating a kubernetes client 08/24/23 11:51:39.186 +Aug 24 11:51:39.187: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename subpath 08/24/23 11:51:39.188 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:51:39.23 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:51:39.24 +[BeforeEach] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:31 -[It] should succeed in writing subpaths in container [Slow] [Conformance] - test/e2e/common/node/expansion.go:297 -STEP: creating the pod 07/29/23 15:52:11.772 -STEP: waiting for pod running 07/29/23 15:52:11.793 -Jul 29 15:52:11.793: INFO: Waiting up to 2m0s for pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" in namespace "var-expansion-6662" to be "running" -Jul 29 15:52:11.802: INFO: Pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a": Phase="Pending", Reason="", readiness=false. Elapsed: 8.646034ms -Jul 29 15:52:13.814: INFO: Pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a": Phase="Running", Reason="", readiness=true. Elapsed: 2.021020255s -Jul 29 15:52:13.814: INFO: Pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" satisfied condition "running" -STEP: creating a file in subpath 07/29/23 15:52:13.814 -Jul 29 15:52:13.824: INFO: ExecWithOptions {Command:[/bin/sh -c touch /volume_mount/mypath/foo/test.log] Namespace:var-expansion-6662 PodName:var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 15:52:13.824: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 15:52:13.826: INFO: ExecWithOptions: Clientset creation -Jul 29 15:52:13.826: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/var-expansion-6662/pods/var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a/exec?command=%2Fbin%2Fsh&command=-c&command=touch+%2Fvolume_mount%2Fmypath%2Ffoo%2Ftest.log&container=dapi-container&container=dapi-container&stderr=true&stdout=true) -STEP: test for file in mounted path 07/29/23 15:52:13.91 -Jul 29 15:52:13.920: INFO: ExecWithOptions {Command:[/bin/sh -c test -f /subpath_mount/test.log] Namespace:var-expansion-6662 PodName:var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 15:52:13.920: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 15:52:13.922: INFO: ExecWithOptions: Clientset creation -Jul 29 15:52:13.923: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/var-expansion-6662/pods/var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a/exec?command=%2Fbin%2Fsh&command=-c&command=test+-f+%2Fsubpath_mount%2Ftest.log&container=dapi-container&container=dapi-container&stderr=true&stdout=true) -STEP: updating the annotation value 07/29/23 15:52:14.031 -Jul 29 15:52:14.556: INFO: Successfully updated pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" -STEP: waiting for annotated pod running 07/29/23 15:52:14.556 -Jul 29 15:52:14.556: INFO: Waiting up to 2m0s for pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" in namespace "var-expansion-6662" to be "running" -Jul 29 15:52:14.564: INFO: Pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a": Phase="Running", Reason="", readiness=true. Elapsed: 7.219733ms -Jul 29 15:52:14.564: INFO: Pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" satisfied condition "running" -STEP: deleting the pod gracefully 07/29/23 15:52:14.564 -Jul 29 15:52:14.564: INFO: Deleting pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" in namespace "var-expansion-6662" -Jul 29 15:52:14.576: INFO: Wait up to 5m0s for pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" to be fully deleted -[AfterEach] [sig-node] Variable Expansion +[BeforeEach] Atomic writer volumes + test/e2e/storage/subpath.go:40 +STEP: Setting up data 08/24/23 11:51:39.249 +[It] should support subpaths with downward pod [Conformance] + test/e2e/storage/subpath.go:92 +STEP: Creating pod pod-subpath-test-downwardapi-z7xc 08/24/23 11:51:39.275 +STEP: Creating a pod to test atomic-volume-subpath 08/24/23 11:51:39.275 +Aug 24 11:51:39.296: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-z7xc" in namespace "subpath-8477" to be "Succeeded or Failed" +Aug 24 11:51:39.303: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Pending", Reason="", readiness=false. Elapsed: 6.596056ms +Aug 24 11:51:41.311: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 2.014441883s +Aug 24 11:51:43.311: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 4.014578363s +Aug 24 11:51:45.310: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 6.013547541s +Aug 24 11:51:47.309: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 8.012016548s +Aug 24 11:51:49.310: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 10.013934123s +Aug 24 11:51:51.308: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 12.011568026s +Aug 24 11:51:53.310: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 14.013809209s +Aug 24 11:51:55.312: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 16.015433365s +Aug 24 11:51:57.311: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 18.014939949s +Aug 24 11:51:59.312: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 20.015222149s +Aug 24 11:52:01.312: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=false. Elapsed: 22.015429004s +Aug 24 11:52:03.312: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.015505216s +STEP: Saw pod success 08/24/23 11:52:03.312 +Aug 24 11:52:03.313: INFO: Pod "pod-subpath-test-downwardapi-z7xc" satisfied condition "Succeeded or Failed" +Aug 24 11:52:03.323: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-subpath-test-downwardapi-z7xc container test-container-subpath-downwardapi-z7xc: +STEP: delete the pod 08/24/23 11:52:03.339 +Aug 24 11:52:03.365: INFO: Waiting for pod pod-subpath-test-downwardapi-z7xc to disappear +Aug 24 11:52:03.378: INFO: Pod pod-subpath-test-downwardapi-z7xc no longer exists +STEP: Deleting pod pod-subpath-test-downwardapi-z7xc 08/24/23 11:52:03.378 +Aug 24 11:52:03.378: INFO: Deleting pod "pod-subpath-test-downwardapi-z7xc" in namespace "subpath-8477" +[AfterEach] [sig-storage] Subpath test/e2e/framework/node/init/init.go:32 -Jul 29 15:52:48.588: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Variable Expansion +Aug 24 11:52:03.385: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-storage] Subpath dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-storage] Subpath tear down framework | framework.go:193 -STEP: Destroying namespace "var-expansion-6662" for this suite. 07/29/23 15:52:48.596 +STEP: Destroying namespace "subpath-8477" for this suite. 08/24/23 11:52:03.394 ------------------------------ -• [SLOW TEST] [36.882 seconds] -[sig-node] Variable Expansion +• [SLOW TEST] [24.234 seconds] +[sig-storage] Subpath +test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + test/e2e/storage/subpath.go:36 + should support subpaths with downward pod [Conformance] + test/e2e/storage/subpath.go:92 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-storage] Subpath + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 11:51:39.186 + Aug 24 11:51:39.187: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename subpath 08/24/23 11:51:39.188 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:51:39.23 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:51:39.24 + [BeforeEach] [sig-storage] Subpath + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] Atomic writer volumes + test/e2e/storage/subpath.go:40 + STEP: Setting up data 08/24/23 11:51:39.249 + [It] should support subpaths with downward pod [Conformance] + test/e2e/storage/subpath.go:92 + STEP: Creating pod pod-subpath-test-downwardapi-z7xc 08/24/23 11:51:39.275 + STEP: Creating a pod to test atomic-volume-subpath 08/24/23 11:51:39.275 + Aug 24 11:51:39.296: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-z7xc" in namespace "subpath-8477" to be "Succeeded or Failed" + Aug 24 11:51:39.303: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Pending", Reason="", readiness=false. Elapsed: 6.596056ms + Aug 24 11:51:41.311: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 2.014441883s + Aug 24 11:51:43.311: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 4.014578363s + Aug 24 11:51:45.310: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 6.013547541s + Aug 24 11:51:47.309: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 8.012016548s + Aug 24 11:51:49.310: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 10.013934123s + Aug 24 11:51:51.308: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 12.011568026s + Aug 24 11:51:53.310: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 14.013809209s + Aug 24 11:51:55.312: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 16.015433365s + Aug 24 11:51:57.311: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 18.014939949s + Aug 24 11:51:59.312: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=true. Elapsed: 20.015222149s + Aug 24 11:52:01.312: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Running", Reason="", readiness=false. Elapsed: 22.015429004s + Aug 24 11:52:03.312: INFO: Pod "pod-subpath-test-downwardapi-z7xc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.015505216s + STEP: Saw pod success 08/24/23 11:52:03.312 + Aug 24 11:52:03.313: INFO: Pod "pod-subpath-test-downwardapi-z7xc" satisfied condition "Succeeded or Failed" + Aug 24 11:52:03.323: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-subpath-test-downwardapi-z7xc container test-container-subpath-downwardapi-z7xc: + STEP: delete the pod 08/24/23 11:52:03.339 + Aug 24 11:52:03.365: INFO: Waiting for pod pod-subpath-test-downwardapi-z7xc to disappear + Aug 24 11:52:03.378: INFO: Pod pod-subpath-test-downwardapi-z7xc no longer exists + STEP: Deleting pod pod-subpath-test-downwardapi-z7xc 08/24/23 11:52:03.378 + Aug 24 11:52:03.378: INFO: Deleting pod "pod-subpath-test-downwardapi-z7xc" in namespace "subpath-8477" + [AfterEach] [sig-storage] Subpath + test/e2e/framework/node/init/init.go:32 + Aug 24 11:52:03.385: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Subpath + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-storage] Subpath + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-storage] Subpath + tear down framework | framework.go:193 + STEP: Destroying namespace "subpath-8477" for this suite. 08/24/23 11:52:03.394 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Probing container + should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:215 +[BeforeEach] [sig-node] Probing container + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 11:52:03.429 +Aug 24 11:52:03.429: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-probe 08/24/23 11:52:03.431 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:52:03.462 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:52:03.468 +[BeforeEach] [sig-node] Probing container + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 +[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:215 +STEP: Creating pod test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2 in namespace container-probe-38 08/24/23 11:52:03.473 +Aug 24 11:52:03.491: INFO: Waiting up to 5m0s for pod "test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2" in namespace "container-probe-38" to be "not pending" +Aug 24 11:52:03.510: INFO: Pod "test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2": Phase="Pending", Reason="", readiness=false. Elapsed: 19.234607ms +Aug 24 11:52:05.517: INFO: Pod "test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2": Phase="Running", Reason="", readiness=true. Elapsed: 2.025845522s +Aug 24 11:52:05.517: INFO: Pod "test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2" satisfied condition "not pending" +Aug 24 11:52:05.517: INFO: Started pod test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2 in namespace container-probe-38 +STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 11:52:05.517 +Aug 24 11:52:05.521: INFO: Initial restart count of pod test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2 is 0 +STEP: deleting the pod 08/24/23 11:56:06.659 +[AfterEach] [sig-node] Probing container + test/e2e/framework/node/init/init.go:32 +Aug 24 11:56:06.680: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Probing container + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-node] Probing container + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-node] Probing container + tear down framework | framework.go:193 +STEP: Destroying namespace "container-probe-38" for this suite. 08/24/23 11:56:06.698 +------------------------------ +• [SLOW TEST] [243.281 seconds] +[sig-node] Probing container test/e2e/common/node/framework.go:23 - should succeed in writing subpaths in container [Slow] [Conformance] - test/e2e/common/node/expansion.go:297 + should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:215 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Variable Expansion + [BeforeEach] [sig-node] Probing container set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:52:11.727 - Jul 29 15:52:11.727: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename var-expansion 07/29/23 15:52:11.731 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:52:11.761 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:52:11.766 - [BeforeEach] [sig-node] Variable Expansion + STEP: Creating a kubernetes client 08/24/23 11:52:03.429 + Aug 24 11:52:03.429: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-probe 08/24/23 11:52:03.431 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:52:03.462 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:52:03.468 + [BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 - [It] should succeed in writing subpaths in container [Slow] [Conformance] - test/e2e/common/node/expansion.go:297 - STEP: creating the pod 07/29/23 15:52:11.772 - STEP: waiting for pod running 07/29/23 15:52:11.793 - Jul 29 15:52:11.793: INFO: Waiting up to 2m0s for pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" in namespace "var-expansion-6662" to be "running" - Jul 29 15:52:11.802: INFO: Pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a": Phase="Pending", Reason="", readiness=false. Elapsed: 8.646034ms - Jul 29 15:52:13.814: INFO: Pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a": Phase="Running", Reason="", readiness=true. Elapsed: 2.021020255s - Jul 29 15:52:13.814: INFO: Pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" satisfied condition "running" - STEP: creating a file in subpath 07/29/23 15:52:13.814 - Jul 29 15:52:13.824: INFO: ExecWithOptions {Command:[/bin/sh -c touch /volume_mount/mypath/foo/test.log] Namespace:var-expansion-6662 PodName:var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 15:52:13.824: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 15:52:13.826: INFO: ExecWithOptions: Clientset creation - Jul 29 15:52:13.826: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/var-expansion-6662/pods/var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a/exec?command=%2Fbin%2Fsh&command=-c&command=touch+%2Fvolume_mount%2Fmypath%2Ffoo%2Ftest.log&container=dapi-container&container=dapi-container&stderr=true&stdout=true) - STEP: test for file in mounted path 07/29/23 15:52:13.91 - Jul 29 15:52:13.920: INFO: ExecWithOptions {Command:[/bin/sh -c test -f /subpath_mount/test.log] Namespace:var-expansion-6662 PodName:var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 15:52:13.920: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 15:52:13.922: INFO: ExecWithOptions: Clientset creation - Jul 29 15:52:13.923: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/var-expansion-6662/pods/var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a/exec?command=%2Fbin%2Fsh&command=-c&command=test+-f+%2Fsubpath_mount%2Ftest.log&container=dapi-container&container=dapi-container&stderr=true&stdout=true) - STEP: updating the annotation value 07/29/23 15:52:14.031 - Jul 29 15:52:14.556: INFO: Successfully updated pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" - STEP: waiting for annotated pod running 07/29/23 15:52:14.556 - Jul 29 15:52:14.556: INFO: Waiting up to 2m0s for pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" in namespace "var-expansion-6662" to be "running" - Jul 29 15:52:14.564: INFO: Pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a": Phase="Running", Reason="", readiness=true. Elapsed: 7.219733ms - Jul 29 15:52:14.564: INFO: Pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" satisfied condition "running" - STEP: deleting the pod gracefully 07/29/23 15:52:14.564 - Jul 29 15:52:14.564: INFO: Deleting pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" in namespace "var-expansion-6662" - Jul 29 15:52:14.576: INFO: Wait up to 5m0s for pod "var-expansion-d3f656d5-c9be-4b25-ba3d-f096be9f3a2a" to be fully deleted - [AfterEach] [sig-node] Variable Expansion + [BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 + [It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:215 + STEP: Creating pod test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2 in namespace container-probe-38 08/24/23 11:52:03.473 + Aug 24 11:52:03.491: INFO: Waiting up to 5m0s for pod "test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2" in namespace "container-probe-38" to be "not pending" + Aug 24 11:52:03.510: INFO: Pod "test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2": Phase="Pending", Reason="", readiness=false. Elapsed: 19.234607ms + Aug 24 11:52:05.517: INFO: Pod "test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2": Phase="Running", Reason="", readiness=true. Elapsed: 2.025845522s + Aug 24 11:52:05.517: INFO: Pod "test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2" satisfied condition "not pending" + Aug 24 11:52:05.517: INFO: Started pod test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2 in namespace container-probe-38 + STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 11:52:05.517 + Aug 24 11:52:05.521: INFO: Initial restart count of pod test-webserver-6d5318e9-1cea-4a4a-8b1b-a0913068c4d2 is 0 + STEP: deleting the pod 08/24/23 11:56:06.659 + [AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 - Jul 29 15:52:48.588: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Variable Expansion + Aug 24 11:56:06.680: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 - STEP: Destroying namespace "var-expansion-6662" for this suite. 07/29/23 15:52:48.596 + STEP: Destroying namespace "container-probe-38" for this suite. 08/24/23 11:56:06.698 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Guestbook application - should create and stop a working application [Conformance] - test/e2e/kubectl/kubectl.go:394 -[BeforeEach] [sig-cli] Kubectl client +[sig-api-machinery] Garbage collector + should delete RS created by deployment when not orphaning [Conformance] + test/e2e/apimachinery/garbage_collector.go:491 +[BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:52:48.612 -Jul 29 15:52:48.613: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 15:52:48.616 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:52:48.651 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:52:48.655 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 11:56:06.712 +Aug 24 11:56:06.712: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename gc 08/24/23 11:56:06.724 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:56:06.764 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:56:06.773 +[BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[It] should create and stop a working application [Conformance] - test/e2e/kubectl/kubectl.go:394 -STEP: creating all guestbook components 07/29/23 15:52:48.66 -Jul 29 15:52:48.660: INFO: apiVersion: v1 -kind: Service -metadata: - name: agnhost-replica - labels: - app: agnhost - role: replica - tier: backend -spec: - ports: - - port: 6379 - selector: - app: agnhost - role: replica - tier: backend +[It] should delete RS created by deployment when not orphaning [Conformance] + test/e2e/apimachinery/garbage_collector.go:491 +STEP: create the deployment 08/24/23 11:56:06.784 +STEP: Wait for the Deployment to create new ReplicaSet 08/24/23 11:56:06.795 +STEP: delete the deployment 08/24/23 11:56:07.317 +STEP: wait for all rs to be garbage collected 08/24/23 11:56:07.328 +STEP: expected 0 rs, got 1 rs 08/24/23 11:56:07.339 +STEP: expected 0 pods, got 2 pods 08/24/23 11:56:07.361 +STEP: Gathering metrics 08/24/23 11:56:07.889 +Aug 24 11:56:07.943: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" +Aug 24 11:56:07.949: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.047528ms +Aug 24 11:56:07.949: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) +Aug 24 11:56:07.949: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" +Aug 24 11:56:08.072: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: -Jul 29 15:52:48.661: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' -Jul 29 15:52:49.166: INFO: stderr: "" -Jul 29 15:52:49.166: INFO: stdout: "service/agnhost-replica created\n" -Jul 29 15:52:49.166: INFO: apiVersion: v1 -kind: Service -metadata: - name: agnhost-primary - labels: - app: agnhost - role: primary - tier: backend -spec: - ports: - - port: 6379 - targetPort: 6379 - selector: - app: agnhost - role: primary - tier: backend - -Jul 29 15:52:49.166: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' -Jul 29 15:52:49.772: INFO: stderr: "" -Jul 29 15:52:49.772: INFO: stdout: "service/agnhost-primary created\n" -Jul 29 15:52:49.772: INFO: apiVersion: v1 -kind: Service -metadata: - name: frontend - labels: - app: guestbook - tier: frontend -spec: - # if your cluster supports it, uncomment the following to automatically create - # an external load-balanced IP for the frontend service. - # type: LoadBalancer - ports: - - port: 80 - selector: - app: guestbook - tier: frontend - -Jul 29 15:52:49.772: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' -Jul 29 15:52:50.181: INFO: stderr: "" -Jul 29 15:52:50.182: INFO: stdout: "service/frontend created\n" -Jul 29 15:52:50.182: INFO: apiVersion: apps/v1 -kind: Deployment -metadata: - name: frontend -spec: - replicas: 3 - selector: - matchLabels: - app: guestbook - tier: frontend - template: - metadata: - labels: - app: guestbook - tier: frontend - spec: - containers: - - name: guestbook-frontend - image: registry.k8s.io/e2e-test-images/agnhost:2.43 - args: [ "guestbook", "--backend-port", "6379" ] - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 80 - -Jul 29 15:52:50.183: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' -Jul 29 15:52:50.733: INFO: stderr: "" -Jul 29 15:52:50.734: INFO: stdout: "deployment.apps/frontend created\n" -Jul 29 15:52:50.734: INFO: apiVersion: apps/v1 -kind: Deployment -metadata: - name: agnhost-primary -spec: - replicas: 1 - selector: - matchLabels: - app: agnhost - role: primary - tier: backend - template: - metadata: - labels: - app: agnhost - role: primary - tier: backend - spec: - containers: - - name: primary - image: registry.k8s.io/e2e-test-images/agnhost:2.43 - args: [ "guestbook", "--http-port", "6379" ] - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 6379 - -Jul 29 15:52:50.734: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' -Jul 29 15:52:51.375: INFO: stderr: "" -Jul 29 15:52:51.375: INFO: stdout: "deployment.apps/agnhost-primary created\n" -Jul 29 15:52:51.375: INFO: apiVersion: apps/v1 -kind: Deployment -metadata: - name: agnhost-replica -spec: - replicas: 2 - selector: - matchLabels: - app: agnhost - role: replica - tier: backend - template: - metadata: - labels: - app: agnhost - role: replica - tier: backend - spec: - containers: - - name: replica - image: registry.k8s.io/e2e-test-images/agnhost:2.43 - args: [ "guestbook", "--replicaof", "agnhost-primary", "--http-port", "6379" ] - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 6379 - -Jul 29 15:52:51.390: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' -Jul 29 15:52:52.522: INFO: stderr: "" -Jul 29 15:52:52.523: INFO: stdout: "deployment.apps/agnhost-replica created\n" -STEP: validating guestbook app 07/29/23 15:52:52.523 -Jul 29 15:52:52.523: INFO: Waiting for all frontend pods to be Running. -Jul 29 15:52:57.574: INFO: Waiting for frontend to serve content. -Jul 29 15:52:57.605: INFO: Trying to add a new entry to the guestbook. -Jul 29 15:52:57.638: INFO: Verifying that added entry can be retrieved. -STEP: using delete to clean up resources 07/29/23 15:52:57.665 -Jul 29 15:52:57.667: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' -Jul 29 15:52:57.911: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Jul 29 15:52:57.911: INFO: stdout: "service \"agnhost-replica\" force deleted\n" -STEP: using delete to clean up resources 07/29/23 15:52:57.911 -Jul 29 15:52:57.912: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' -Jul 29 15:52:58.173: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Jul 29 15:52:58.173: INFO: stdout: "service \"agnhost-primary\" force deleted\n" -STEP: using delete to clean up resources 07/29/23 15:52:58.173 -Jul 29 15:52:58.177: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' -Jul 29 15:52:58.405: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Jul 29 15:52:58.405: INFO: stdout: "service \"frontend\" force deleted\n" -STEP: using delete to clean up resources 07/29/23 15:52:58.406 -Jul 29 15:52:58.406: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' -Jul 29 15:52:58.544: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Jul 29 15:52:58.544: INFO: stdout: "deployment.apps \"frontend\" force deleted\n" -STEP: using delete to clean up resources 07/29/23 15:52:58.544 -Jul 29 15:52:58.544: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' -Jul 29 15:52:58.766: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Jul 29 15:52:58.766: INFO: stdout: "deployment.apps \"agnhost-primary\" force deleted\n" -STEP: using delete to clean up resources 07/29/23 15:52:58.767 -Jul 29 15:52:58.768: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' -Jul 29 15:52:59.090: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Jul 29 15:52:59.090: INFO: stdout: "deployment.apps \"agnhost-replica\" force deleted\n" -[AfterEach] [sig-cli] Kubectl client - test/e2e/framework/node/init/init.go:32 -Jul 29 15:52:59.090: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client - tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-9693" for this suite. 07/29/23 15:52:59.101 ------------------------------- -• [SLOW TEST] [10.554 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Guestbook application - test/e2e/kubectl/kubectl.go:369 - should create and stop a working application [Conformance] - test/e2e/kubectl/kubectl.go:394 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:52:48.612 - Jul 29 15:52:48.613: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 15:52:48.616 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:52:48.651 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:52:48.655 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [It] should create and stop a working application [Conformance] - test/e2e/kubectl/kubectl.go:394 - STEP: creating all guestbook components 07/29/23 15:52:48.66 - Jul 29 15:52:48.660: INFO: apiVersion: v1 - kind: Service - metadata: - name: agnhost-replica - labels: - app: agnhost - role: replica - tier: backend - spec: - ports: - - port: 6379 - selector: - app: agnhost - role: replica - tier: backend - - Jul 29 15:52:48.661: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' - Jul 29 15:52:49.166: INFO: stderr: "" - Jul 29 15:52:49.166: INFO: stdout: "service/agnhost-replica created\n" - Jul 29 15:52:49.166: INFO: apiVersion: v1 - kind: Service - metadata: - name: agnhost-primary - labels: - app: agnhost - role: primary - tier: backend - spec: - ports: - - port: 6379 - targetPort: 6379 - selector: - app: agnhost - role: primary - tier: backend - - Jul 29 15:52:49.166: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' - Jul 29 15:52:49.772: INFO: stderr: "" - Jul 29 15:52:49.772: INFO: stdout: "service/agnhost-primary created\n" - Jul 29 15:52:49.772: INFO: apiVersion: v1 - kind: Service - metadata: - name: frontend - labels: - app: guestbook - tier: frontend - spec: - # if your cluster supports it, uncomment the following to automatically create - # an external load-balanced IP for the frontend service. - # type: LoadBalancer - ports: - - port: 80 - selector: - app: guestbook - tier: frontend - - Jul 29 15:52:49.772: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' - Jul 29 15:52:50.181: INFO: stderr: "" - Jul 29 15:52:50.182: INFO: stdout: "service/frontend created\n" - Jul 29 15:52:50.182: INFO: apiVersion: apps/v1 - kind: Deployment - metadata: - name: frontend - spec: - replicas: 3 - selector: - matchLabels: - app: guestbook - tier: frontend - template: - metadata: - labels: - app: guestbook - tier: frontend - spec: - containers: - - name: guestbook-frontend - image: registry.k8s.io/e2e-test-images/agnhost:2.43 - args: [ "guestbook", "--backend-port", "6379" ] - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 80 - - Jul 29 15:52:50.183: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' - Jul 29 15:52:50.733: INFO: stderr: "" - Jul 29 15:52:50.734: INFO: stdout: "deployment.apps/frontend created\n" - Jul 29 15:52:50.734: INFO: apiVersion: apps/v1 - kind: Deployment - metadata: - name: agnhost-primary - spec: - replicas: 1 - selector: - matchLabels: - app: agnhost - role: primary - tier: backend - template: - metadata: - labels: - app: agnhost - role: primary - tier: backend - spec: - containers: - - name: primary - image: registry.k8s.io/e2e-test-images/agnhost:2.43 - args: [ "guestbook", "--http-port", "6379" ] - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 6379 - - Jul 29 15:52:50.734: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' - Jul 29 15:52:51.375: INFO: stderr: "" - Jul 29 15:52:51.375: INFO: stdout: "deployment.apps/agnhost-primary created\n" - Jul 29 15:52:51.375: INFO: apiVersion: apps/v1 - kind: Deployment - metadata: - name: agnhost-replica - spec: - replicas: 2 - selector: - matchLabels: - app: agnhost - role: replica - tier: backend - template: - metadata: - labels: - app: agnhost - role: replica - tier: backend - spec: - containers: - - name: replica - image: registry.k8s.io/e2e-test-images/agnhost:2.43 - args: [ "guestbook", "--replicaof", "agnhost-primary", "--http-port", "6379" ] - resources: - requests: - cpu: 100m - memory: 100Mi - ports: - - containerPort: 6379 - - Jul 29 15:52:51.390: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 create -f -' - Jul 29 15:52:52.522: INFO: stderr: "" - Jul 29 15:52:52.523: INFO: stdout: "deployment.apps/agnhost-replica created\n" - STEP: validating guestbook app 07/29/23 15:52:52.523 - Jul 29 15:52:52.523: INFO: Waiting for all frontend pods to be Running. - Jul 29 15:52:57.574: INFO: Waiting for frontend to serve content. - Jul 29 15:52:57.605: INFO: Trying to add a new entry to the guestbook. - Jul 29 15:52:57.638: INFO: Verifying that added entry can be retrieved. - STEP: using delete to clean up resources 07/29/23 15:52:57.665 - Jul 29 15:52:57.667: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' - Jul 29 15:52:57.911: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" - Jul 29 15:52:57.911: INFO: stdout: "service \"agnhost-replica\" force deleted\n" - STEP: using delete to clean up resources 07/29/23 15:52:57.911 - Jul 29 15:52:57.912: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' - Jul 29 15:52:58.173: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" - Jul 29 15:52:58.173: INFO: stdout: "service \"agnhost-primary\" force deleted\n" - STEP: using delete to clean up resources 07/29/23 15:52:58.173 - Jul 29 15:52:58.177: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' - Jul 29 15:52:58.405: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" - Jul 29 15:52:58.405: INFO: stdout: "service \"frontend\" force deleted\n" - STEP: using delete to clean up resources 07/29/23 15:52:58.406 - Jul 29 15:52:58.406: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' - Jul 29 15:52:58.544: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" - Jul 29 15:52:58.544: INFO: stdout: "deployment.apps \"frontend\" force deleted\n" - STEP: using delete to clean up resources 07/29/23 15:52:58.544 - Jul 29 15:52:58.544: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' - Jul 29 15:52:58.766: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" - Jul 29 15:52:58.766: INFO: stdout: "deployment.apps \"agnhost-primary\" force deleted\n" - STEP: using delete to clean up resources 07/29/23 15:52:58.767 - Jul 29 15:52:58.768: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9693 delete --grace-period=0 --force -f -' - Jul 29 15:52:59.090: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" - Jul 29 15:52:59.090: INFO: stdout: "deployment.apps \"agnhost-replica\" force deleted\n" - [AfterEach] [sig-cli] Kubectl client - test/e2e/framework/node/init/init.go:32 - Jul 29 15:52:59.090: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client - tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-9693" for this suite. 07/29/23 15:52:59.101 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - patching/updating a validating webhook should work [Conformance] - test/e2e/apimachinery/webhook.go:413 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:52:59.171 -Jul 29 15:52:59.171: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 15:52:59.184 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:52:59.265 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:52:59.295 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 15:52:59.422 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 15:52:59.991 -STEP: Deploying the webhook pod 07/29/23 15:53:00.008 -STEP: Wait for the deployment to be ready 07/29/23 15:53:00.033 -Jul 29 15:53:00.058: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service 07/29/23 15:53:02.078 -STEP: Verifying the service has paired with the endpoint 07/29/23 15:53:02.097 -Jul 29 15:53:03.097: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] patching/updating a validating webhook should work [Conformance] - test/e2e/apimachinery/webhook.go:413 -STEP: Creating a validating webhook configuration 07/29/23 15:53:03.104 -STEP: Creating a configMap that does not comply to the validation webhook rules 07/29/23 15:53:03.135 -STEP: Updating a validating webhook configuration's rules to not include the create operation 07/29/23 15:53:03.152 -STEP: Creating a configMap that does not comply to the validation webhook rules 07/29/23 15:53:03.172 -STEP: Patching a validating webhook configuration's rules to include the create operation 07/29/23 15:53:03.189 -STEP: Creating a configMap that does not comply to the validation webhook rules 07/29/23 15:53:03.201 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 -Jul 29 15:53:03.219: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 11:56:08.072: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-4272" for this suite. 07/29/23 15:53:03.358 -STEP: Destroying namespace "webhook-4272-markers" for this suite. 07/29/23 15:53:03.38 +STEP: Destroying namespace "gc-8741" for this suite. 08/24/23 11:56:08.083 ------------------------------ -• [4.241 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +• [1.389 seconds] +[sig-api-machinery] Garbage collector test/e2e/apimachinery/framework.go:23 - patching/updating a validating webhook should work [Conformance] - test/e2e/apimachinery/webhook.go:413 + should delete RS created by deployment when not orphaning [Conformance] + test/e2e/apimachinery/garbage_collector.go:491 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:52:59.171 - Jul 29 15:52:59.171: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 15:52:59.184 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:52:59.265 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:52:59.295 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 11:56:06.712 + Aug 24 11:56:06.712: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename gc 08/24/23 11:56:06.724 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:56:06.764 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:56:06.773 + [BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 15:52:59.422 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 15:52:59.991 - STEP: Deploying the webhook pod 07/29/23 15:53:00.008 - STEP: Wait for the deployment to be ready 07/29/23 15:53:00.033 - Jul 29 15:53:00.058: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set - STEP: Deploying the webhook service 07/29/23 15:53:02.078 - STEP: Verifying the service has paired with the endpoint 07/29/23 15:53:02.097 - Jul 29 15:53:03.097: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] patching/updating a validating webhook should work [Conformance] - test/e2e/apimachinery/webhook.go:413 - STEP: Creating a validating webhook configuration 07/29/23 15:53:03.104 - STEP: Creating a configMap that does not comply to the validation webhook rules 07/29/23 15:53:03.135 - STEP: Updating a validating webhook configuration's rules to not include the create operation 07/29/23 15:53:03.152 - STEP: Creating a configMap that does not comply to the validation webhook rules 07/29/23 15:53:03.172 - STEP: Patching a validating webhook configuration's rules to include the create operation 07/29/23 15:53:03.189 - STEP: Creating a configMap that does not comply to the validation webhook rules 07/29/23 15:53:03.201 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [It] should delete RS created by deployment when not orphaning [Conformance] + test/e2e/apimachinery/garbage_collector.go:491 + STEP: create the deployment 08/24/23 11:56:06.784 + STEP: Wait for the Deployment to create new ReplicaSet 08/24/23 11:56:06.795 + STEP: delete the deployment 08/24/23 11:56:07.317 + STEP: wait for all rs to be garbage collected 08/24/23 11:56:07.328 + STEP: expected 0 rs, got 1 rs 08/24/23 11:56:07.339 + STEP: expected 0 pods, got 2 pods 08/24/23 11:56:07.361 + STEP: Gathering metrics 08/24/23 11:56:07.889 + Aug 24 11:56:07.943: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" + Aug 24 11:56:07.949: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.047528ms + Aug 24 11:56:07.949: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) + Aug 24 11:56:07.949: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" + Aug 24 11:56:08.072: INFO: For apiserver_request_total: + For apiserver_request_latency_seconds: + For apiserver_init_events_total: + For garbage_collector_attempt_to_delete_queue_latency: + For garbage_collector_attempt_to_delete_work_duration: + For garbage_collector_attempt_to_orphan_queue_latency: + For garbage_collector_attempt_to_orphan_work_duration: + For garbage_collector_dirty_processing_latency_microseconds: + For garbage_collector_event_processing_latency_microseconds: + For garbage_collector_graph_changes_queue_latency: + For garbage_collector_graph_changes_work_duration: + For garbage_collector_orphan_processing_latency_microseconds: + For namespace_queue_latency: + For namespace_queue_latency_sum: + For namespace_queue_latency_count: + For namespace_retries: + For namespace_work_duration: + For namespace_work_duration_sum: + For namespace_work_duration_count: + For function_duration_seconds: + For errors_total: + For evicted_pods_total: + + [AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 - Jul 29 15:53:03.219: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 11:56:08.072: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-4272" for this suite. 07/29/23 15:53:03.358 - STEP: Destroying namespace "webhook-4272-markers" for this suite. 07/29/23 15:53:03.38 + STEP: Destroying namespace "gc-8741" for this suite. 08/24/23 11:56:08.083 << End Captured GinkgoWriter Output ------------------------------ -SSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] ConfigMap - should run through a ConfigMap lifecycle [Conformance] - test/e2e/common/node/configmap.go:169 -[BeforeEach] [sig-node] ConfigMap +[sig-node] Variable Expansion + should succeed in writing subpaths in container [Slow] [Conformance] + test/e2e/common/node/expansion.go:297 +[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:53:03.417 -Jul 29 15:53:03.419: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 15:53:03.424 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:03.455 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:03.459 -[BeforeEach] [sig-node] ConfigMap +STEP: Creating a kubernetes client 08/24/23 11:56:08.106 +Aug 24 11:56:08.106: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename var-expansion 08/24/23 11:56:08.108 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:56:08.139 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:56:08.145 +[BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 -[It] should run through a ConfigMap lifecycle [Conformance] - test/e2e/common/node/configmap.go:169 -STEP: creating a ConfigMap 07/29/23 15:53:03.465 -STEP: fetching the ConfigMap 07/29/23 15:53:03.476 -STEP: patching the ConfigMap 07/29/23 15:53:03.482 -STEP: listing all ConfigMaps in all namespaces with a label selector 07/29/23 15:53:03.489 -STEP: deleting the ConfigMap by collection with a label selector 07/29/23 15:53:03.496 -STEP: listing all ConfigMaps in test namespace 07/29/23 15:53:03.51 -[AfterEach] [sig-node] ConfigMap +[It] should succeed in writing subpaths in container [Slow] [Conformance] + test/e2e/common/node/expansion.go:297 +STEP: creating the pod 08/24/23 11:56:08.151 +STEP: waiting for pod running 08/24/23 11:56:08.181 +Aug 24 11:56:08.181: INFO: Waiting up to 2m0s for pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" in namespace "var-expansion-9608" to be "running" +Aug 24 11:56:08.193: INFO: Pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8": Phase="Pending", Reason="", readiness=false. Elapsed: 11.847109ms +Aug 24 11:56:10.200: INFO: Pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8": Phase="Running", Reason="", readiness=true. Elapsed: 2.018598807s +Aug 24 11:56:10.200: INFO: Pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" satisfied condition "running" +STEP: creating a file in subpath 08/24/23 11:56:10.2 +Aug 24 11:56:10.205: INFO: ExecWithOptions {Command:[/bin/sh -c touch /volume_mount/mypath/foo/test.log] Namespace:var-expansion-9608 PodName:var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8 ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:56:10.205: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:56:10.208: INFO: ExecWithOptions: Clientset creation +Aug 24 11:56:10.208: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/var-expansion-9608/pods/var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8/exec?command=%2Fbin%2Fsh&command=-c&command=touch+%2Fvolume_mount%2Fmypath%2Ffoo%2Ftest.log&container=dapi-container&container=dapi-container&stderr=true&stdout=true) +STEP: test for file in mounted path 08/24/23 11:56:10.316 +Aug 24 11:56:10.322: INFO: ExecWithOptions {Command:[/bin/sh -c test -f /subpath_mount/test.log] Namespace:var-expansion-9608 PodName:var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8 ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 11:56:10.323: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 11:56:10.324: INFO: ExecWithOptions: Clientset creation +Aug 24 11:56:10.324: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/var-expansion-9608/pods/var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8/exec?command=%2Fbin%2Fsh&command=-c&command=test+-f+%2Fsubpath_mount%2Ftest.log&container=dapi-container&container=dapi-container&stderr=true&stdout=true) +STEP: updating the annotation value 08/24/23 11:56:10.44 +Aug 24 11:56:10.962: INFO: Successfully updated pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" +STEP: waiting for annotated pod running 08/24/23 11:56:10.962 +Aug 24 11:56:10.962: INFO: Waiting up to 2m0s for pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" in namespace "var-expansion-9608" to be "running" +Aug 24 11:56:10.970: INFO: Pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8": Phase="Running", Reason="", readiness=true. Elapsed: 7.314786ms +Aug 24 11:56:10.970: INFO: Pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" satisfied condition "running" +STEP: deleting the pod gracefully 08/24/23 11:56:10.97 +Aug 24 11:56:10.970: INFO: Deleting pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" in namespace "var-expansion-9608" +Aug 24 11:56:10.994: INFO: Wait up to 5m0s for pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" to be fully deleted +[AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 -Jul 29 15:53:03.515: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] ConfigMap +Aug 24 11:56:45.012: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] ConfigMap +[DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] ConfigMap +[DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-2099" for this suite. 07/29/23 15:53:03.523 +STEP: Destroying namespace "var-expansion-9608" for this suite. 08/24/23 11:56:45.024 ------------------------------ -• [0.117 seconds] -[sig-node] ConfigMap +• [SLOW TEST] [36.931 seconds] +[sig-node] Variable Expansion test/e2e/common/node/framework.go:23 - should run through a ConfigMap lifecycle [Conformance] - test/e2e/common/node/configmap.go:169 + should succeed in writing subpaths in container [Slow] [Conformance] + test/e2e/common/node/expansion.go:297 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] ConfigMap + [BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:53:03.417 - Jul 29 15:53:03.419: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 15:53:03.424 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:03.455 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:03.459 - [BeforeEach] [sig-node] ConfigMap + STEP: Creating a kubernetes client 08/24/23 11:56:08.106 + Aug 24 11:56:08.106: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename var-expansion 08/24/23 11:56:08.108 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:56:08.139 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:56:08.145 + [BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 - [It] should run through a ConfigMap lifecycle [Conformance] - test/e2e/common/node/configmap.go:169 - STEP: creating a ConfigMap 07/29/23 15:53:03.465 - STEP: fetching the ConfigMap 07/29/23 15:53:03.476 - STEP: patching the ConfigMap 07/29/23 15:53:03.482 - STEP: listing all ConfigMaps in all namespaces with a label selector 07/29/23 15:53:03.489 - STEP: deleting the ConfigMap by collection with a label selector 07/29/23 15:53:03.496 - STEP: listing all ConfigMaps in test namespace 07/29/23 15:53:03.51 - [AfterEach] [sig-node] ConfigMap + [It] should succeed in writing subpaths in container [Slow] [Conformance] + test/e2e/common/node/expansion.go:297 + STEP: creating the pod 08/24/23 11:56:08.151 + STEP: waiting for pod running 08/24/23 11:56:08.181 + Aug 24 11:56:08.181: INFO: Waiting up to 2m0s for pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" in namespace "var-expansion-9608" to be "running" + Aug 24 11:56:08.193: INFO: Pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8": Phase="Pending", Reason="", readiness=false. Elapsed: 11.847109ms + Aug 24 11:56:10.200: INFO: Pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8": Phase="Running", Reason="", readiness=true. Elapsed: 2.018598807s + Aug 24 11:56:10.200: INFO: Pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" satisfied condition "running" + STEP: creating a file in subpath 08/24/23 11:56:10.2 + Aug 24 11:56:10.205: INFO: ExecWithOptions {Command:[/bin/sh -c touch /volume_mount/mypath/foo/test.log] Namespace:var-expansion-9608 PodName:var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8 ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:56:10.205: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:56:10.208: INFO: ExecWithOptions: Clientset creation + Aug 24 11:56:10.208: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/var-expansion-9608/pods/var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8/exec?command=%2Fbin%2Fsh&command=-c&command=touch+%2Fvolume_mount%2Fmypath%2Ffoo%2Ftest.log&container=dapi-container&container=dapi-container&stderr=true&stdout=true) + STEP: test for file in mounted path 08/24/23 11:56:10.316 + Aug 24 11:56:10.322: INFO: ExecWithOptions {Command:[/bin/sh -c test -f /subpath_mount/test.log] Namespace:var-expansion-9608 PodName:var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8 ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 11:56:10.323: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 11:56:10.324: INFO: ExecWithOptions: Clientset creation + Aug 24 11:56:10.324: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/var-expansion-9608/pods/var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8/exec?command=%2Fbin%2Fsh&command=-c&command=test+-f+%2Fsubpath_mount%2Ftest.log&container=dapi-container&container=dapi-container&stderr=true&stdout=true) + STEP: updating the annotation value 08/24/23 11:56:10.44 + Aug 24 11:56:10.962: INFO: Successfully updated pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" + STEP: waiting for annotated pod running 08/24/23 11:56:10.962 + Aug 24 11:56:10.962: INFO: Waiting up to 2m0s for pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" in namespace "var-expansion-9608" to be "running" + Aug 24 11:56:10.970: INFO: Pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8": Phase="Running", Reason="", readiness=true. Elapsed: 7.314786ms + Aug 24 11:56:10.970: INFO: Pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" satisfied condition "running" + STEP: deleting the pod gracefully 08/24/23 11:56:10.97 + Aug 24 11:56:10.970: INFO: Deleting pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" in namespace "var-expansion-9608" + Aug 24 11:56:10.994: INFO: Wait up to 5m0s for pod "var-expansion-1b471ee9-a874-4acd-bcbf-67f3086b66b8" to be fully deleted + [AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 - Jul 29 15:53:03.515: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] ConfigMap + Aug 24 11:56:45.012: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] ConfigMap + [DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] ConfigMap + [DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-2099" for this suite. 07/29/23 15:53:03.523 + STEP: Destroying namespace "var-expansion-9608" for this suite. 08/24/23 11:56:45.024 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should delete a collection of services [Conformance] - test/e2e/network/service.go:3654 -[BeforeEach] [sig-network] Services +[sig-apps] Job + should manage the lifecycle of a job [Conformance] + test/e2e/apps/job.go:703 +[BeforeEach] [sig-apps] Job set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:53:03.535 -Jul 29 15:53:03.535: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 15:53:03.538 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:03.558 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:03.561 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 11:56:45.048 +Aug 24 11:56:45.049: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename job 08/24/23 11:56:45.053 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:56:45.092 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:56:45.097 +[BeforeEach] [sig-apps] Job test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should delete a collection of services [Conformance] - test/e2e/network/service.go:3654 -STEP: creating a collection of services 07/29/23 15:53:03.565 -Jul 29 15:53:03.565: INFO: Creating e2e-svc-a-cplsq -Jul 29 15:53:03.581: INFO: Creating e2e-svc-b-vckmn -Jul 29 15:53:03.599: INFO: Creating e2e-svc-c-7gbs5 -STEP: deleting service collection 07/29/23 15:53:03.64 -Jul 29 15:53:03.774: INFO: Collection of services has been deleted -[AfterEach] [sig-network] Services +[It] should manage the lifecycle of a job [Conformance] + test/e2e/apps/job.go:703 +STEP: Creating a suspended job 08/24/23 11:56:45.106 +STEP: Patching the Job 08/24/23 11:56:45.122 +STEP: Watching for Job to be patched 08/24/23 11:56:45.15 +Aug 24 11:56:45.153: INFO: Event ADDED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6] and annotations: map[batch.kubernetes.io/job-tracking:] +Aug 24 11:56:45.153: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6] and annotations: map[batch.kubernetes.io/job-tracking:] +Aug 24 11:56:45.153: INFO: Event MODIFIED found for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking:] +STEP: Updating the job 08/24/23 11:56:45.154 +STEP: Watching for Job to be updated 08/24/23 11:56:45.169 +Aug 24 11:56:45.172: INFO: Event MODIFIED found for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] +Aug 24 11:56:45.172: INFO: Found Job annotations: map[string]string{"batch.kubernetes.io/job-tracking":"", "updated":"true"} +STEP: Listing all Jobs with LabelSelector 08/24/23 11:56:45.172 +Aug 24 11:56:45.177: INFO: Job: e2e-wknj6 as labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] +STEP: Waiting for job to complete 08/24/23 11:56:45.177 +STEP: Delete a job collection with a labelselector 08/24/23 11:56:57.184 +STEP: Watching for Job to be deleted 08/24/23 11:56:57.202 +Aug 24 11:56:57.206: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] +Aug 24 11:56:57.207: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] +Aug 24 11:56:57.207: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] +Aug 24 11:56:57.207: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] +Aug 24 11:56:57.208: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] +Aug 24 11:56:57.208: INFO: Event DELETED found for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] +STEP: Relist jobs to confirm deletion 08/24/23 11:56:57.208 +[AfterEach] [sig-apps] Job test/e2e/framework/node/init/init.go:32 -Jul 29 15:53:03.774: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 11:56:57.215: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Job test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-apps] Job dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-apps] Job tear down framework | framework.go:193 -STEP: Destroying namespace "services-4247" for this suite. 07/29/23 15:53:03.791 +STEP: Destroying namespace "job-7777" for this suite. 08/24/23 11:56:57.223 ------------------------------ -• [0.271 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should delete a collection of services [Conformance] - test/e2e/network/service.go:3654 +• [SLOW TEST] [12.188 seconds] +[sig-apps] Job +test/e2e/apps/framework.go:23 + should manage the lifecycle of a job [Conformance] + test/e2e/apps/job.go:703 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-apps] Job set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:53:03.535 - Jul 29 15:53:03.535: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 15:53:03.538 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:03.558 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:03.561 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 11:56:45.048 + Aug 24 11:56:45.049: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename job 08/24/23 11:56:45.053 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:56:45.092 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:56:45.097 + [BeforeEach] [sig-apps] Job test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should delete a collection of services [Conformance] - test/e2e/network/service.go:3654 - STEP: creating a collection of services 07/29/23 15:53:03.565 - Jul 29 15:53:03.565: INFO: Creating e2e-svc-a-cplsq - Jul 29 15:53:03.581: INFO: Creating e2e-svc-b-vckmn - Jul 29 15:53:03.599: INFO: Creating e2e-svc-c-7gbs5 - STEP: deleting service collection 07/29/23 15:53:03.64 - Jul 29 15:53:03.774: INFO: Collection of services has been deleted - [AfterEach] [sig-network] Services + [It] should manage the lifecycle of a job [Conformance] + test/e2e/apps/job.go:703 + STEP: Creating a suspended job 08/24/23 11:56:45.106 + STEP: Patching the Job 08/24/23 11:56:45.122 + STEP: Watching for Job to be patched 08/24/23 11:56:45.15 + Aug 24 11:56:45.153: INFO: Event ADDED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6] and annotations: map[batch.kubernetes.io/job-tracking:] + Aug 24 11:56:45.153: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6] and annotations: map[batch.kubernetes.io/job-tracking:] + Aug 24 11:56:45.153: INFO: Event MODIFIED found for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking:] + STEP: Updating the job 08/24/23 11:56:45.154 + STEP: Watching for Job to be updated 08/24/23 11:56:45.169 + Aug 24 11:56:45.172: INFO: Event MODIFIED found for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] + Aug 24 11:56:45.172: INFO: Found Job annotations: map[string]string{"batch.kubernetes.io/job-tracking":"", "updated":"true"} + STEP: Listing all Jobs with LabelSelector 08/24/23 11:56:45.172 + Aug 24 11:56:45.177: INFO: Job: e2e-wknj6 as labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] + STEP: Waiting for job to complete 08/24/23 11:56:45.177 + STEP: Delete a job collection with a labelselector 08/24/23 11:56:57.184 + STEP: Watching for Job to be deleted 08/24/23 11:56:57.202 + Aug 24 11:56:57.206: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] + Aug 24 11:56:57.207: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] + Aug 24 11:56:57.207: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] + Aug 24 11:56:57.207: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] + Aug 24 11:56:57.208: INFO: Event MODIFIED observed for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] + Aug 24 11:56:57.208: INFO: Event DELETED found for Job e2e-wknj6 in namespace job-7777 with labels: map[e2e-job-label:e2e-wknj6 e2e-wknj6:patched] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] + STEP: Relist jobs to confirm deletion 08/24/23 11:56:57.208 + [AfterEach] [sig-apps] Job test/e2e/framework/node/init/init.go:32 - Jul 29 15:53:03.774: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 11:56:57.215: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Job test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-apps] Job dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-apps] Job tear down framework | framework.go:193 - STEP: Destroying namespace "services-4247" for this suite. 07/29/23 15:53:03.791 + STEP: Destroying namespace "job-7777" for this suite. 08/24/23 11:56:57.223 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - removes definition from spec when one version gets changed to not be served [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:442 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[sig-node] Probing container + should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:184 +[BeforeEach] [sig-node] Probing container set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:53:03.809 -Jul 29 15:53:03.809: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 15:53:03.812 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:03.839 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:03.85 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 11:56:57.258 +Aug 24 11:56:57.258: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-probe 08/24/23 11:56:57.26 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:56:57.326 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:56:57.333 +[BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 -[It] removes definition from spec when one version gets changed to not be served [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:442 -STEP: set up a multi version CRD 07/29/23 15:53:03.855 -Jul 29 15:53:03.857: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: mark a version not serverd 07/29/23 15:53:09.808 -STEP: check the unserved version gets removed 07/29/23 15:53:09.846 -STEP: check the other version is not changed 07/29/23 15:53:12.4 -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 +[It] should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:184 +STEP: Creating pod liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4 in namespace container-probe-4704 08/24/23 11:56:57.338 +Aug 24 11:56:57.352: INFO: Waiting up to 5m0s for pod "liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4" in namespace "container-probe-4704" to be "not pending" +Aug 24 11:56:57.361: INFO: Pod "liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4": Phase="Pending", Reason="", readiness=false. Elapsed: 9.134284ms +Aug 24 11:56:59.370: INFO: Pod "liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4": Phase="Running", Reason="", readiness=true. Elapsed: 2.018339203s +Aug 24 11:56:59.370: INFO: Pod "liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4" satisfied condition "not pending" +Aug 24 11:56:59.370: INFO: Started pod liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4 in namespace container-probe-4704 +STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 11:56:59.37 +Aug 24 11:56:59.376: INFO: Initial restart count of pod liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4 is 0 +STEP: deleting the pod 08/24/23 12:01:00.53 +[AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 -Jul 29 15:53:16.653: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +Aug 24 12:01:00.559: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 -STEP: Destroying namespace "crd-publish-openapi-9397" for this suite. 07/29/23 15:53:16.68 +STEP: Destroying namespace "container-probe-4704" for this suite. 08/24/23 12:01:00.569 ------------------------------ -• [SLOW TEST] [12.888 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - removes definition from spec when one version gets changed to not be served [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:442 +• [SLOW TEST] [243.346 seconds] +[sig-node] Probing container +test/e2e/common/node/framework.go:23 + should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:184 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Probing container set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:53:03.809 - Jul 29 15:53:03.809: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 15:53:03.812 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:03.839 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:03.85 - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 11:56:57.258 + Aug 24 11:56:57.258: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-probe 08/24/23 11:56:57.26 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 11:56:57.326 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 11:56:57.333 + [BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 - [It] removes definition from spec when one version gets changed to not be served [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:442 - STEP: set up a multi version CRD 07/29/23 15:53:03.855 - Jul 29 15:53:03.857: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: mark a version not serverd 07/29/23 15:53:09.808 - STEP: check the unserved version gets removed 07/29/23 15:53:09.846 - STEP: check the other version is not changed 07/29/23 15:53:12.4 - [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 + [It] should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:184 + STEP: Creating pod liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4 in namespace container-probe-4704 08/24/23 11:56:57.338 + Aug 24 11:56:57.352: INFO: Waiting up to 5m0s for pod "liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4" in namespace "container-probe-4704" to be "not pending" + Aug 24 11:56:57.361: INFO: Pod "liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4": Phase="Pending", Reason="", readiness=false. Elapsed: 9.134284ms + Aug 24 11:56:59.370: INFO: Pod "liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4": Phase="Running", Reason="", readiness=true. Elapsed: 2.018339203s + Aug 24 11:56:59.370: INFO: Pod "liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4" satisfied condition "not pending" + Aug 24 11:56:59.370: INFO: Started pod liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4 in namespace container-probe-4704 + STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 11:56:59.37 + Aug 24 11:56:59.376: INFO: Initial restart count of pod liveness-4bcf1f9d-8bc6-466a-920f-0ee72d0e22a4 is 0 + STEP: deleting the pod 08/24/23 12:01:00.53 + [AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 - Jul 29 15:53:16.653: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + Aug 24 12:01:00.559: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 - STEP: Destroying namespace "crd-publish-openapi-9397" for this suite. 07/29/23 15:53:16.68 + STEP: Destroying namespace "container-probe-4704" for this suite. 08/24/23 12:01:00.569 << End Captured GinkgoWriter Output ------------------------------ +SSSSSSSSSSSSSS +------------------------------ [sig-node] RuntimeClass should reject a Pod requesting a non-existent RuntimeClass [NodeConformance] [Conformance] test/e2e/common/node/runtimeclass.go:55 [BeforeEach] [sig-node] RuntimeClass set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:53:16.699 -Jul 29 15:53:16.699: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename runtimeclass 07/29/23 15:53:16.701 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:16.74 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:16.745 +STEP: Creating a kubernetes client 08/24/23 12:01:00.609 +Aug 24 12:01:00.610: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename runtimeclass 08/24/23 12:01:00.614 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:00.654 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:00.659 [BeforeEach] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:31 [It] should reject a Pod requesting a non-existent RuntimeClass [NodeConformance] [Conformance] test/e2e/common/node/runtimeclass.go:55 [AfterEach] [sig-node] RuntimeClass test/e2e/framework/node/init/init.go:32 -Jul 29 15:53:16.772: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:01:00.679: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] RuntimeClass dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] RuntimeClass tear down framework | framework.go:193 -STEP: Destroying namespace "runtimeclass-1962" for this suite. 07/29/23 15:53:16.779 +STEP: Destroying namespace "runtimeclass-5040" for this suite. 08/24/23 12:01:00.689 ------------------------------ -• [0.102 seconds] +• [0.091 seconds] [sig-node] RuntimeClass test/e2e/common/node/framework.go:23 should reject a Pod requesting a non-existent RuntimeClass [NodeConformance] [Conformance] @@ -5652,96 +5801,431 @@ test/e2e/common/node/framework.go:23 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-node] RuntimeClass set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:53:16.699 - Jul 29 15:53:16.699: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename runtimeclass 07/29/23 15:53:16.701 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:16.74 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:16.745 + STEP: Creating a kubernetes client 08/24/23 12:01:00.609 + Aug 24 12:01:00.610: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename runtimeclass 08/24/23 12:01:00.614 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:00.654 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:00.659 [BeforeEach] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:31 [It] should reject a Pod requesting a non-existent RuntimeClass [NodeConformance] [Conformance] test/e2e/common/node/runtimeclass.go:55 [AfterEach] [sig-node] RuntimeClass test/e2e/framework/node/init/init.go:32 - Jul 29 15:53:16.772: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:01:00.679: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] RuntimeClass + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-node] RuntimeClass + dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] RuntimeClass + tear down framework | framework.go:193 + STEP: Destroying namespace "runtimeclass-5040" for this suite. 08/24/23 12:01:00.689 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate configmap [Conformance] + test/e2e/apimachinery/webhook.go:252 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:01:00.708 +Aug 24 12:01:00.708: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 12:01:00.71 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:00.738 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:00.743 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 12:01:00.774 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:01:02.292 +STEP: Deploying the webhook pod 08/24/23 12:01:02.308 +STEP: Wait for the deployment to be ready 08/24/23 12:01:02.331 +Aug 24 12:01:02.349: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service 08/24/23 12:01:04.374 +STEP: Verifying the service has paired with the endpoint 08/24/23 12:01:04.407 +Aug 24 12:01:05.408: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate configmap [Conformance] + test/e2e/apimachinery/webhook.go:252 +STEP: Registering the mutating configmap webhook via the AdmissionRegistration API 08/24/23 12:01:05.416 +STEP: create a configmap that should be updated by the webhook 08/24/23 12:01:05.45 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/node/init/init.go:32 +Aug 24 12:01:05.484: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + tear down framework | framework.go:193 +STEP: Destroying namespace "webhook-1878" for this suite. 08/24/23 12:01:05.59 +STEP: Destroying namespace "webhook-1878-markers" for this suite. 08/24/23 12:01:05.606 +------------------------------ +• [4.914 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should mutate configmap [Conformance] + test/e2e/apimachinery/webhook.go:252 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:01:00.708 + Aug 24 12:01:00.708: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 12:01:00.71 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:00.738 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:00.743 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 12:01:00.774 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:01:02.292 + STEP: Deploying the webhook pod 08/24/23 12:01:02.308 + STEP: Wait for the deployment to be ready 08/24/23 12:01:02.331 + Aug 24 12:01:02.349: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set + STEP: Deploying the webhook service 08/24/23 12:01:04.374 + STEP: Verifying the service has paired with the endpoint 08/24/23 12:01:04.407 + Aug 24 12:01:05.408: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should mutate configmap [Conformance] + test/e2e/apimachinery/webhook.go:252 + STEP: Registering the mutating configmap webhook via the AdmissionRegistration API 08/24/23 12:01:05.416 + STEP: create a configmap that should be updated by the webhook 08/24/23 12:01:05.45 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/node/init/init.go:32 + Aug 24 12:01:05.484: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] RuntimeClass + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] RuntimeClass + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "runtimeclass-1962" for this suite. 07/29/23 15:53:16.779 + STEP: Destroying namespace "webhook-1878" for this suite. 08/24/23 12:01:05.59 + STEP: Destroying namespace "webhook-1878-markers" for this suite. 08/24/23 12:01:05.606 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSS +SSS ------------------------------ -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a configMap. [Conformance] - test/e2e/apimachinery/resource_quota.go:326 -[BeforeEach] [sig-api-machinery] ResourceQuota +[sig-cli] Kubectl client Update Demo + should scale a replication controller [Conformance] + test/e2e/kubectl/kubectl.go:352 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:53:16.803 -Jul 29 15:53:16.803: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 15:53:16.806 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:16.866 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:16.875 -[BeforeEach] [sig-api-machinery] ResourceQuota +STEP: Creating a kubernetes client 08/24/23 12:01:05.625 +Aug 24 12:01:05.626: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:01:05.633 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:05.708 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:05.717 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[It] should create a ResourceQuota and capture the life of a configMap. [Conformance] - test/e2e/apimachinery/resource_quota.go:326 -STEP: Counting existing ResourceQuota 07/29/23 15:53:33.893 -STEP: Creating a ResourceQuota 07/29/23 15:53:38.901 -STEP: Ensuring resource quota status is calculated 07/29/23 15:53:38.917 -STEP: Creating a ConfigMap 07/29/23 15:53:40.933 -STEP: Ensuring resource quota status captures configMap creation 07/29/23 15:53:40.953 -STEP: Deleting a ConfigMap 07/29/23 15:53:42.961 -STEP: Ensuring resource quota status released usage 07/29/23 15:53:42.973 -[AfterEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[BeforeEach] Update Demo + test/e2e/kubectl/kubectl.go:326 +[It] should scale a replication controller [Conformance] + test/e2e/kubectl/kubectl.go:352 +STEP: creating a replication controller 08/24/23 12:01:05.729 +Aug 24 12:01:05.730: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 create -f -' +Aug 24 12:01:07.849: INFO: stderr: "" +Aug 24 12:01:07.849: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. 08/24/23 12:01:07.849 +Aug 24 12:01:07.849: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Aug 24 12:01:08.069: INFO: stderr: "" +Aug 24 12:01:08.069: INFO: stdout: "update-demo-nautilus-gvjxq update-demo-nautilus-krfsl " +Aug 24 12:01:08.069: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-gvjxq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:01:08.217: INFO: stderr: "" +Aug 24 12:01:08.217: INFO: stdout: "" +Aug 24 12:01:08.217: INFO: update-demo-nautilus-gvjxq is created but not running +Aug 24 12:01:13.218: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Aug 24 12:01:13.390: INFO: stderr: "" +Aug 24 12:01:13.390: INFO: stdout: "update-demo-nautilus-gvjxq update-demo-nautilus-krfsl " +Aug 24 12:01:13.390: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-gvjxq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:01:13.578: INFO: stderr: "" +Aug 24 12:01:13.578: INFO: stdout: "" +Aug 24 12:01:13.578: INFO: update-demo-nautilus-gvjxq is created but not running +Aug 24 12:01:18.580: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Aug 24 12:01:18.756: INFO: stderr: "" +Aug 24 12:01:18.756: INFO: stdout: "update-demo-nautilus-gvjxq update-demo-nautilus-krfsl " +Aug 24 12:01:18.756: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-gvjxq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:01:18.917: INFO: stderr: "" +Aug 24 12:01:18.917: INFO: stdout: "" +Aug 24 12:01:18.917: INFO: update-demo-nautilus-gvjxq is created but not running +Aug 24 12:01:23.919: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Aug 24 12:01:24.071: INFO: stderr: "" +Aug 24 12:01:24.071: INFO: stdout: "update-demo-nautilus-gvjxq update-demo-nautilus-krfsl " +Aug 24 12:01:24.071: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-gvjxq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:01:24.201: INFO: stderr: "" +Aug 24 12:01:24.201: INFO: stdout: "true" +Aug 24 12:01:24.202: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-gvjxq -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Aug 24 12:01:24.335: INFO: stderr: "" +Aug 24 12:01:24.335: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" +Aug 24 12:01:24.335: INFO: validating pod update-demo-nautilus-gvjxq +Aug 24 12:01:24.353: INFO: got data: { + "image": "nautilus.jpg" +} + +Aug 24 12:01:24.354: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Aug 24 12:01:24.354: INFO: update-demo-nautilus-gvjxq is verified up and running +Aug 24 12:01:24.354: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:01:24.503: INFO: stderr: "" +Aug 24 12:01:24.503: INFO: stdout: "true" +Aug 24 12:01:24.503: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Aug 24 12:01:24.646: INFO: stderr: "" +Aug 24 12:01:24.646: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" +Aug 24 12:01:24.646: INFO: validating pod update-demo-nautilus-krfsl +Aug 24 12:01:24.673: INFO: got data: { + "image": "nautilus.jpg" +} + +Aug 24 12:01:24.673: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Aug 24 12:01:24.673: INFO: update-demo-nautilus-krfsl is verified up and running +STEP: scaling down the replication controller 08/24/23 12:01:24.673 +Aug 24 12:01:24.691: INFO: scanned /root for discovery docs: +Aug 24 12:01:24.691: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 scale rc update-demo-nautilus --replicas=1 --timeout=5m' +Aug 24 12:01:24.878: INFO: stderr: "" +Aug 24 12:01:24.878: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" +STEP: waiting for all containers in name=update-demo pods to come up. 08/24/23 12:01:24.878 +Aug 24 12:01:24.882: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Aug 24 12:01:25.072: INFO: stderr: "" +Aug 24 12:01:25.072: INFO: stdout: "update-demo-nautilus-gvjxq update-demo-nautilus-krfsl " +STEP: Replicas for name=update-demo: expected=1 actual=2 08/24/23 12:01:25.072 +Aug 24 12:01:30.073: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Aug 24 12:01:30.212: INFO: stderr: "" +Aug 24 12:01:30.212: INFO: stdout: "update-demo-nautilus-krfsl " +Aug 24 12:01:30.213: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:01:30.340: INFO: stderr: "" +Aug 24 12:01:30.340: INFO: stdout: "true" +Aug 24 12:01:30.340: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Aug 24 12:01:30.469: INFO: stderr: "" +Aug 24 12:01:30.469: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" +Aug 24 12:01:30.469: INFO: validating pod update-demo-nautilus-krfsl +Aug 24 12:01:30.483: INFO: got data: { + "image": "nautilus.jpg" +} + +Aug 24 12:01:30.483: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Aug 24 12:01:30.483: INFO: update-demo-nautilus-krfsl is verified up and running +STEP: scaling up the replication controller 08/24/23 12:01:30.483 +Aug 24 12:01:30.492: INFO: scanned /root for discovery docs: +Aug 24 12:01:30.493: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 scale rc update-demo-nautilus --replicas=2 --timeout=5m' +Aug 24 12:01:31.687: INFO: stderr: "" +Aug 24 12:01:31.687: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" +STEP: waiting for all containers in name=update-demo pods to come up. 08/24/23 12:01:31.687 +Aug 24 12:01:31.688: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Aug 24 12:01:31.829: INFO: stderr: "" +Aug 24 12:01:31.829: INFO: stdout: "update-demo-nautilus-krfsl update-demo-nautilus-v6mpd " +Aug 24 12:01:31.830: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:01:31.986: INFO: stderr: "" +Aug 24 12:01:31.986: INFO: stdout: "true" +Aug 24 12:01:31.986: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Aug 24 12:01:32.149: INFO: stderr: "" +Aug 24 12:01:32.149: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" +Aug 24 12:01:32.149: INFO: validating pod update-demo-nautilus-krfsl +Aug 24 12:01:32.158: INFO: got data: { + "image": "nautilus.jpg" +} + +Aug 24 12:01:32.158: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Aug 24 12:01:32.158: INFO: update-demo-nautilus-krfsl is verified up and running +Aug 24 12:01:32.158: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-v6mpd -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:01:32.307: INFO: stderr: "" +Aug 24 12:01:32.307: INFO: stdout: "true" +Aug 24 12:01:32.308: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-v6mpd -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Aug 24 12:01:32.480: INFO: stderr: "" +Aug 24 12:01:32.480: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" +Aug 24 12:01:32.480: INFO: validating pod update-demo-nautilus-v6mpd +Aug 24 12:01:32.503: INFO: got data: { + "image": "nautilus.jpg" +} + +Aug 24 12:01:32.503: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Aug 24 12:01:32.503: INFO: update-demo-nautilus-v6mpd is verified up and running +STEP: using delete to clean up resources 08/24/23 12:01:32.504 +Aug 24 12:01:32.504: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 delete --grace-period=0 --force -f -' +Aug 24 12:01:32.631: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Aug 24 12:01:32.631: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +Aug 24 12:01:32.632: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get rc,svc -l name=update-demo --no-headers' +Aug 24 12:01:32.811: INFO: stderr: "No resources found in kubectl-110 namespace.\n" +Aug 24 12:01:32.811: INFO: stdout: "" +Aug 24 12:01:32.811: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Aug 24 12:01:32.965: INFO: stderr: "" +Aug 24 12:01:32.965: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 15:53:44.986: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +Aug 24 12:01:32.965: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-8060" for this suite. 07/29/23 15:53:44.995 +STEP: Destroying namespace "kubectl-110" for this suite. 08/24/23 12:01:32.977 ------------------------------ -• [SLOW TEST] [28.204 seconds] -[sig-api-machinery] ResourceQuota -test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a configMap. [Conformance] - test/e2e/apimachinery/resource_quota.go:326 +• [SLOW TEST] [27.365 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Update Demo + test/e2e/kubectl/kubectl.go:324 + should scale a replication controller [Conformance] + test/e2e/kubectl/kubectl.go:352 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:53:16.803 - Jul 29 15:53:16.803: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 15:53:16.806 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:16.866 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:16.875 - [BeforeEach] [sig-api-machinery] ResourceQuota + STEP: Creating a kubernetes client 08/24/23 12:01:05.625 + Aug 24 12:01:05.626: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:01:05.633 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:05.708 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:05.717 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [It] should create a ResourceQuota and capture the life of a configMap. [Conformance] - test/e2e/apimachinery/resource_quota.go:326 - STEP: Counting existing ResourceQuota 07/29/23 15:53:33.893 - STEP: Creating a ResourceQuota 07/29/23 15:53:38.901 - STEP: Ensuring resource quota status is calculated 07/29/23 15:53:38.917 - STEP: Creating a ConfigMap 07/29/23 15:53:40.933 - STEP: Ensuring resource quota status captures configMap creation 07/29/23 15:53:40.953 - STEP: Deleting a ConfigMap 07/29/23 15:53:42.961 - STEP: Ensuring resource quota status released usage 07/29/23 15:53:42.973 - [AfterEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [BeforeEach] Update Demo + test/e2e/kubectl/kubectl.go:326 + [It] should scale a replication controller [Conformance] + test/e2e/kubectl/kubectl.go:352 + STEP: creating a replication controller 08/24/23 12:01:05.729 + Aug 24 12:01:05.730: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 create -f -' + Aug 24 12:01:07.849: INFO: stderr: "" + Aug 24 12:01:07.849: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" + STEP: waiting for all containers in name=update-demo pods to come up. 08/24/23 12:01:07.849 + Aug 24 12:01:07.849: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' + Aug 24 12:01:08.069: INFO: stderr: "" + Aug 24 12:01:08.069: INFO: stdout: "update-demo-nautilus-gvjxq update-demo-nautilus-krfsl " + Aug 24 12:01:08.069: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-gvjxq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:01:08.217: INFO: stderr: "" + Aug 24 12:01:08.217: INFO: stdout: "" + Aug 24 12:01:08.217: INFO: update-demo-nautilus-gvjxq is created but not running + Aug 24 12:01:13.218: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' + Aug 24 12:01:13.390: INFO: stderr: "" + Aug 24 12:01:13.390: INFO: stdout: "update-demo-nautilus-gvjxq update-demo-nautilus-krfsl " + Aug 24 12:01:13.390: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-gvjxq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:01:13.578: INFO: stderr: "" + Aug 24 12:01:13.578: INFO: stdout: "" + Aug 24 12:01:13.578: INFO: update-demo-nautilus-gvjxq is created but not running + Aug 24 12:01:18.580: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' + Aug 24 12:01:18.756: INFO: stderr: "" + Aug 24 12:01:18.756: INFO: stdout: "update-demo-nautilus-gvjxq update-demo-nautilus-krfsl " + Aug 24 12:01:18.756: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-gvjxq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:01:18.917: INFO: stderr: "" + Aug 24 12:01:18.917: INFO: stdout: "" + Aug 24 12:01:18.917: INFO: update-demo-nautilus-gvjxq is created but not running + Aug 24 12:01:23.919: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' + Aug 24 12:01:24.071: INFO: stderr: "" + Aug 24 12:01:24.071: INFO: stdout: "update-demo-nautilus-gvjxq update-demo-nautilus-krfsl " + Aug 24 12:01:24.071: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-gvjxq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:01:24.201: INFO: stderr: "" + Aug 24 12:01:24.201: INFO: stdout: "true" + Aug 24 12:01:24.202: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-gvjxq -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' + Aug 24 12:01:24.335: INFO: stderr: "" + Aug 24 12:01:24.335: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" + Aug 24 12:01:24.335: INFO: validating pod update-demo-nautilus-gvjxq + Aug 24 12:01:24.353: INFO: got data: { + "image": "nautilus.jpg" + } + + Aug 24 12:01:24.354: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . + Aug 24 12:01:24.354: INFO: update-demo-nautilus-gvjxq is verified up and running + Aug 24 12:01:24.354: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:01:24.503: INFO: stderr: "" + Aug 24 12:01:24.503: INFO: stdout: "true" + Aug 24 12:01:24.503: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' + Aug 24 12:01:24.646: INFO: stderr: "" + Aug 24 12:01:24.646: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" + Aug 24 12:01:24.646: INFO: validating pod update-demo-nautilus-krfsl + Aug 24 12:01:24.673: INFO: got data: { + "image": "nautilus.jpg" + } + + Aug 24 12:01:24.673: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . + Aug 24 12:01:24.673: INFO: update-demo-nautilus-krfsl is verified up and running + STEP: scaling down the replication controller 08/24/23 12:01:24.673 + Aug 24 12:01:24.691: INFO: scanned /root for discovery docs: + Aug 24 12:01:24.691: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 scale rc update-demo-nautilus --replicas=1 --timeout=5m' + Aug 24 12:01:24.878: INFO: stderr: "" + Aug 24 12:01:24.878: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" + STEP: waiting for all containers in name=update-demo pods to come up. 08/24/23 12:01:24.878 + Aug 24 12:01:24.882: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' + Aug 24 12:01:25.072: INFO: stderr: "" + Aug 24 12:01:25.072: INFO: stdout: "update-demo-nautilus-gvjxq update-demo-nautilus-krfsl " + STEP: Replicas for name=update-demo: expected=1 actual=2 08/24/23 12:01:25.072 + Aug 24 12:01:30.073: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' + Aug 24 12:01:30.212: INFO: stderr: "" + Aug 24 12:01:30.212: INFO: stdout: "update-demo-nautilus-krfsl " + Aug 24 12:01:30.213: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:01:30.340: INFO: stderr: "" + Aug 24 12:01:30.340: INFO: stdout: "true" + Aug 24 12:01:30.340: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' + Aug 24 12:01:30.469: INFO: stderr: "" + Aug 24 12:01:30.469: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" + Aug 24 12:01:30.469: INFO: validating pod update-demo-nautilus-krfsl + Aug 24 12:01:30.483: INFO: got data: { + "image": "nautilus.jpg" + } + + Aug 24 12:01:30.483: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . + Aug 24 12:01:30.483: INFO: update-demo-nautilus-krfsl is verified up and running + STEP: scaling up the replication controller 08/24/23 12:01:30.483 + Aug 24 12:01:30.492: INFO: scanned /root for discovery docs: + Aug 24 12:01:30.493: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 scale rc update-demo-nautilus --replicas=2 --timeout=5m' + Aug 24 12:01:31.687: INFO: stderr: "" + Aug 24 12:01:31.687: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" + STEP: waiting for all containers in name=update-demo pods to come up. 08/24/23 12:01:31.687 + Aug 24 12:01:31.688: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' + Aug 24 12:01:31.829: INFO: stderr: "" + Aug 24 12:01:31.829: INFO: stdout: "update-demo-nautilus-krfsl update-demo-nautilus-v6mpd " + Aug 24 12:01:31.830: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:01:31.986: INFO: stderr: "" + Aug 24 12:01:31.986: INFO: stdout: "true" + Aug 24 12:01:31.986: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-krfsl -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' + Aug 24 12:01:32.149: INFO: stderr: "" + Aug 24 12:01:32.149: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" + Aug 24 12:01:32.149: INFO: validating pod update-demo-nautilus-krfsl + Aug 24 12:01:32.158: INFO: got data: { + "image": "nautilus.jpg" + } + + Aug 24 12:01:32.158: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . + Aug 24 12:01:32.158: INFO: update-demo-nautilus-krfsl is verified up and running + Aug 24 12:01:32.158: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-v6mpd -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:01:32.307: INFO: stderr: "" + Aug 24 12:01:32.307: INFO: stdout: "true" + Aug 24 12:01:32.308: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods update-demo-nautilus-v6mpd -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' + Aug 24 12:01:32.480: INFO: stderr: "" + Aug 24 12:01:32.480: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" + Aug 24 12:01:32.480: INFO: validating pod update-demo-nautilus-v6mpd + Aug 24 12:01:32.503: INFO: got data: { + "image": "nautilus.jpg" + } + + Aug 24 12:01:32.503: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . + Aug 24 12:01:32.503: INFO: update-demo-nautilus-v6mpd is verified up and running + STEP: using delete to clean up resources 08/24/23 12:01:32.504 + Aug 24 12:01:32.504: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 delete --grace-period=0 --force -f -' + Aug 24 12:01:32.631: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" + Aug 24 12:01:32.631: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" + Aug 24 12:01:32.632: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get rc,svc -l name=update-demo --no-headers' + Aug 24 12:01:32.811: INFO: stderr: "No resources found in kubectl-110 namespace.\n" + Aug 24 12:01:32.811: INFO: stdout: "" + Aug 24 12:01:32.811: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-110 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' + Aug 24 12:01:32.965: INFO: stderr: "" + Aug 24 12:01:32.965: INFO: stdout: "" + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 15:53:44.986: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + Aug 24 12:01:32.965: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-8060" for this suite. 07/29/23 15:53:44.995 + STEP: Destroying namespace "kubectl-110" for this suite. 08/24/23 12:01:32.977 << End Captured GinkgoWriter Output ------------------------------ SSS @@ -5751,38 +6235,38 @@ SSS test/e2e/common/storage/empty_dir.go:157 [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:53:45.013 -Jul 29 15:53:45.013: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 15:53:45.019 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:45.057 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:45.062 +STEP: Creating a kubernetes client 08/24/23 12:01:32.992 +Aug 24 12:01:32.992: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:01:32.996 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:33.031 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:33.035 [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 [It] volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] test/e2e/common/storage/empty_dir.go:157 -STEP: Creating a pod to test emptydir volume type on node default medium 07/29/23 15:53:45.066 -Jul 29 15:53:45.082: INFO: Waiting up to 5m0s for pod "pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5" in namespace "emptydir-427" to be "Succeeded or Failed" -Jul 29 15:53:45.092: INFO: Pod "pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5": Phase="Pending", Reason="", readiness=false. Elapsed: 9.104661ms -Jul 29 15:53:47.101: INFO: Pod "pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018869348s -Jul 29 15:53:49.100: INFO: Pod "pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017061378s -STEP: Saw pod success 07/29/23 15:53:49.1 -Jul 29 15:53:49.100: INFO: Pod "pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5" satisfied condition "Succeeded or Failed" -Jul 29 15:53:49.105: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5 container test-container: -STEP: delete the pod 07/29/23 15:53:49.134 -Jul 29 15:53:49.158: INFO: Waiting for pod pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5 to disappear -Jul 29 15:53:49.164: INFO: Pod pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5 no longer exists +STEP: Creating a pod to test emptydir volume type on node default medium 08/24/23 12:01:33.042 +Aug 24 12:01:33.063: INFO: Waiting up to 5m0s for pod "pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa" in namespace "emptydir-2210" to be "Succeeded or Failed" +Aug 24 12:01:33.075: INFO: Pod "pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa": Phase="Pending", Reason="", readiness=false. Elapsed: 12.127414ms +Aug 24 12:01:35.091: INFO: Pod "pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa": Phase="Running", Reason="", readiness=true. Elapsed: 2.028684277s +Aug 24 12:01:37.081: INFO: Pod "pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018398152s +STEP: Saw pod success 08/24/23 12:01:37.081 +Aug 24 12:01:37.081: INFO: Pod "pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa" satisfied condition "Succeeded or Failed" +Aug 24 12:01:37.087: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa container test-container: +STEP: delete the pod 08/24/23 12:01:37.119 +Aug 24 12:01:37.139: INFO: Waiting for pod pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa to disappear +Aug 24 12:01:37.149: INFO: Pod pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa no longer exists [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 15:53:49.165: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:01:37.150: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-427" for this suite. 07/29/23 15:53:49.173 +STEP: Destroying namespace "emptydir-2210" for this suite. 08/24/23 12:01:37.161 ------------------------------ -• [4.172 seconds] +• [4.180 seconds] [sig-storage] EmptyDir volumes test/e2e/common/storage/framework.go:23 volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] @@ -5791,891 +6275,421 @@ test/e2e/common/storage/framework.go:23 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:53:45.013 - Jul 29 15:53:45.013: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 15:53:45.019 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:45.057 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:45.062 + STEP: Creating a kubernetes client 08/24/23 12:01:32.992 + Aug 24 12:01:32.992: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:01:32.996 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:33.031 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:33.035 [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 [It] volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] test/e2e/common/storage/empty_dir.go:157 - STEP: Creating a pod to test emptydir volume type on node default medium 07/29/23 15:53:45.066 - Jul 29 15:53:45.082: INFO: Waiting up to 5m0s for pod "pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5" in namespace "emptydir-427" to be "Succeeded or Failed" - Jul 29 15:53:45.092: INFO: Pod "pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5": Phase="Pending", Reason="", readiness=false. Elapsed: 9.104661ms - Jul 29 15:53:47.101: INFO: Pod "pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018869348s - Jul 29 15:53:49.100: INFO: Pod "pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017061378s - STEP: Saw pod success 07/29/23 15:53:49.1 - Jul 29 15:53:49.100: INFO: Pod "pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5" satisfied condition "Succeeded or Failed" - Jul 29 15:53:49.105: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5 container test-container: - STEP: delete the pod 07/29/23 15:53:49.134 - Jul 29 15:53:49.158: INFO: Waiting for pod pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5 to disappear - Jul 29 15:53:49.164: INFO: Pod pod-262e92ad-9430-4ea7-b0e8-0e463150bdb5 no longer exists + STEP: Creating a pod to test emptydir volume type on node default medium 08/24/23 12:01:33.042 + Aug 24 12:01:33.063: INFO: Waiting up to 5m0s for pod "pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa" in namespace "emptydir-2210" to be "Succeeded or Failed" + Aug 24 12:01:33.075: INFO: Pod "pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa": Phase="Pending", Reason="", readiness=false. Elapsed: 12.127414ms + Aug 24 12:01:35.091: INFO: Pod "pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa": Phase="Running", Reason="", readiness=true. Elapsed: 2.028684277s + Aug 24 12:01:37.081: INFO: Pod "pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018398152s + STEP: Saw pod success 08/24/23 12:01:37.081 + Aug 24 12:01:37.081: INFO: Pod "pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa" satisfied condition "Succeeded or Failed" + Aug 24 12:01:37.087: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa container test-container: + STEP: delete the pod 08/24/23 12:01:37.119 + Aug 24 12:01:37.139: INFO: Waiting for pod pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa to disappear + Aug 24 12:01:37.149: INFO: Pod pod-cfbcdf5b-1ecf-4ed8-8df6-0019e6f3feaa no longer exists [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 15:53:49.165: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:01:37.150: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-427" for this suite. 07/29/23 15:53:49.173 + STEP: Destroying namespace "emptydir-2210" for this suite. 08/24/23 12:01:37.161 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSS +SSSSSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for CRD preserving unknown fields in an embedded object [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:236 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[sig-node] Pods Extended Pods Set QOS Class + should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] + test/e2e/node/pods.go:161 +[BeforeEach] [sig-node] Pods Extended set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:53:49.187 -Jul 29 15:53:49.187: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 15:53:49.189 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:49.217 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:49.224 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:01:37.175 +Aug 24 12:01:37.175: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 12:01:37.177 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:37.209 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:37.216 +[BeforeEach] [sig-node] Pods Extended test/e2e/framework/metrics/init/init.go:31 -[It] works for CRD preserving unknown fields in an embedded object [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:236 -Jul 29 15:53:49.230: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 07/29/23 15:53:51.804 -Jul 29 15:53:51.805: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-6533 --namespace=crd-publish-openapi-6533 create -f -' -Jul 29 15:53:53.341: INFO: stderr: "" -Jul 29 15:53:53.341: INFO: stdout: "e2e-test-crd-publish-openapi-3924-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" -Jul 29 15:53:53.341: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-6533 --namespace=crd-publish-openapi-6533 delete e2e-test-crd-publish-openapi-3924-crds test-cr' -Jul 29 15:53:53.630: INFO: stderr: "" -Jul 29 15:53:53.630: INFO: stdout: "e2e-test-crd-publish-openapi-3924-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" -Jul 29 15:53:53.630: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-6533 --namespace=crd-publish-openapi-6533 apply -f -' -Jul 29 15:53:54.084: INFO: stderr: "" -Jul 29 15:53:54.084: INFO: stdout: "e2e-test-crd-publish-openapi-3924-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" -Jul 29 15:53:54.084: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-6533 --namespace=crd-publish-openapi-6533 delete e2e-test-crd-publish-openapi-3924-crds test-cr' -Jul 29 15:53:54.262: INFO: stderr: "" -Jul 29 15:53:54.262: INFO: stdout: "e2e-test-crd-publish-openapi-3924-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" -STEP: kubectl explain works to explain CR 07/29/23 15:53:54.262 -Jul 29 15:53:54.262: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-6533 explain e2e-test-crd-publish-openapi-3924-crds' -Jul 29 15:53:55.472: INFO: stderr: "" -Jul 29 15:53:55.472: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-3924-crd\nVERSION: crd-publish-openapi-test-unknown-in-nested.example.com/v1\n\nDESCRIPTION:\n preserve-unknown-properties in nested field for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t<>\n Specification of Waldo\n\n status\t\n Status of Waldo\n\n" -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] Pods Set QOS Class + test/e2e/node/pods.go:152 +[It] should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] + test/e2e/node/pods.go:161 +STEP: creating the pod 08/24/23 12:01:37.223 +STEP: submitting the pod to kubernetes 08/24/23 12:01:37.223 +STEP: verifying QOS class is set on the pod 08/24/23 12:01:37.237 +[AfterEach] [sig-node] Pods Extended test/e2e/framework/node/init/init.go:32 -Jul 29 15:53:58.626: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +Aug 24 12:01:37.246: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods Extended test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Pods Extended dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Pods Extended tear down framework | framework.go:193 -STEP: Destroying namespace "crd-publish-openapi-6533" for this suite. 07/29/23 15:53:58.648 +STEP: Destroying namespace "pods-5142" for this suite. 08/24/23 12:01:37.261 ------------------------------ -• [SLOW TEST] [9.482 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - works for CRD preserving unknown fields in an embedded object [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:236 +• [0.104 seconds] +[sig-node] Pods Extended +test/e2e/node/framework.go:23 + Pods Set QOS Class + test/e2e/node/pods.go:150 + should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] + test/e2e/node/pods.go:161 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Pods Extended set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:53:49.187 - Jul 29 15:53:49.187: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 15:53:49.189 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:49.217 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:49.224 - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:01:37.175 + Aug 24 12:01:37.175: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 12:01:37.177 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:37.209 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:37.216 + [BeforeEach] [sig-node] Pods Extended test/e2e/framework/metrics/init/init.go:31 - [It] works for CRD preserving unknown fields in an embedded object [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:236 - Jul 29 15:53:49.230: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 07/29/23 15:53:51.804 - Jul 29 15:53:51.805: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-6533 --namespace=crd-publish-openapi-6533 create -f -' - Jul 29 15:53:53.341: INFO: stderr: "" - Jul 29 15:53:53.341: INFO: stdout: "e2e-test-crd-publish-openapi-3924-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" - Jul 29 15:53:53.341: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-6533 --namespace=crd-publish-openapi-6533 delete e2e-test-crd-publish-openapi-3924-crds test-cr' - Jul 29 15:53:53.630: INFO: stderr: "" - Jul 29 15:53:53.630: INFO: stdout: "e2e-test-crd-publish-openapi-3924-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" - Jul 29 15:53:53.630: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-6533 --namespace=crd-publish-openapi-6533 apply -f -' - Jul 29 15:53:54.084: INFO: stderr: "" - Jul 29 15:53:54.084: INFO: stdout: "e2e-test-crd-publish-openapi-3924-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" - Jul 29 15:53:54.084: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-6533 --namespace=crd-publish-openapi-6533 delete e2e-test-crd-publish-openapi-3924-crds test-cr' - Jul 29 15:53:54.262: INFO: stderr: "" - Jul 29 15:53:54.262: INFO: stdout: "e2e-test-crd-publish-openapi-3924-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" - STEP: kubectl explain works to explain CR 07/29/23 15:53:54.262 - Jul 29 15:53:54.262: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-6533 explain e2e-test-crd-publish-openapi-3924-crds' - Jul 29 15:53:55.472: INFO: stderr: "" - Jul 29 15:53:55.472: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-3924-crd\nVERSION: crd-publish-openapi-test-unknown-in-nested.example.com/v1\n\nDESCRIPTION:\n preserve-unknown-properties in nested field for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t<>\n Specification of Waldo\n\n status\t\n Status of Waldo\n\n" - [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [BeforeEach] Pods Set QOS Class + test/e2e/node/pods.go:152 + [It] should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] + test/e2e/node/pods.go:161 + STEP: creating the pod 08/24/23 12:01:37.223 + STEP: submitting the pod to kubernetes 08/24/23 12:01:37.223 + STEP: verifying QOS class is set on the pod 08/24/23 12:01:37.237 + [AfterEach] [sig-node] Pods Extended test/e2e/framework/node/init/init.go:32 - Jul 29 15:53:58.626: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + Aug 24 12:01:37.246: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods Extended test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Pods Extended dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Pods Extended tear down framework | framework.go:193 - STEP: Destroying namespace "crd-publish-openapi-6533" for this suite. 07/29/23 15:53:58.648 + STEP: Destroying namespace "pods-5142" for this suite. 08/24/23 12:01:37.261 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSS +SSSS ------------------------------ [sig-node] Container Lifecycle Hook when create a pod with lifecycle hook - should execute prestop exec hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:151 + should execute poststart http hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:167 [BeforeEach] [sig-node] Container Lifecycle Hook set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:53:58.671 -Jul 29 15:53:58.671: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-lifecycle-hook 07/29/23 15:53:58.673 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:58.71 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:58.719 +STEP: Creating a kubernetes client 08/24/23 12:01:37.282 +Aug 24 12:01:37.282: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-lifecycle-hook 08/24/23 12:01:37.285 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:37.317 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:37.322 [BeforeEach] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:31 [BeforeEach] when create a pod with lifecycle hook test/e2e/common/node/lifecycle_hook.go:77 -STEP: create the container to handle the HTTPGet hook request. 07/29/23 15:53:58.739 -Jul 29 15:53:58.757: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-454" to be "running and ready" -Jul 29 15:53:58.764: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 6.5937ms -Jul 29 15:53:58.764: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:54:00.776: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018272105s -Jul 29 15:54:00.776: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:54:02.782: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 4.024329196s -Jul 29 15:54:02.782: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) -Jul 29 15:54:02.782: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" -[It] should execute prestop exec hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:151 -STEP: create the pod with lifecycle hook 07/29/23 15:54:02.79 -Jul 29 15:54:02.805: INFO: Waiting up to 5m0s for pod "pod-with-prestop-exec-hook" in namespace "container-lifecycle-hook-454" to be "running and ready" -Jul 29 15:54:02.822: INFO: Pod "pod-with-prestop-exec-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 16.687031ms -Jul 29 15:54:02.822: INFO: The phase of Pod pod-with-prestop-exec-hook is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:54:04.835: INFO: Pod "pod-with-prestop-exec-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 2.029613343s -Jul 29 15:54:04.835: INFO: The phase of Pod pod-with-prestop-exec-hook is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:54:06.830: INFO: Pod "pod-with-prestop-exec-hook": Phase="Running", Reason="", readiness=true. Elapsed: 4.024582714s -Jul 29 15:54:06.830: INFO: The phase of Pod pod-with-prestop-exec-hook is Running (Ready = true) -Jul 29 15:54:06.830: INFO: Pod "pod-with-prestop-exec-hook" satisfied condition "running and ready" -STEP: delete the pod with lifecycle hook 07/29/23 15:54:06.836 -Jul 29 15:54:06.849: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear -Jul 29 15:54:06.871: INFO: Pod pod-with-prestop-exec-hook still exists -Jul 29 15:54:08.873: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear -Jul 29 15:54:08.882: INFO: Pod pod-with-prestop-exec-hook no longer exists -STEP: check prestop hook 07/29/23 15:54:08.882 +STEP: create the container to handle the HTTPGet hook request. 08/24/23 12:01:37.333 +Aug 24 12:01:37.348: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-4257" to be "running and ready" +Aug 24 12:01:37.354: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 6.419517ms +Aug 24 12:01:37.355: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:01:39.361: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 2.013652621s +Aug 24 12:01:39.362: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) +Aug 24 12:01:39.362: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" +[It] should execute poststart http hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:167 +STEP: create the pod with lifecycle hook 08/24/23 12:01:39.367 +Aug 24 12:01:39.376: INFO: Waiting up to 5m0s for pod "pod-with-poststart-http-hook" in namespace "container-lifecycle-hook-4257" to be "running and ready" +Aug 24 12:01:39.385: INFO: Pod "pod-with-poststart-http-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 9.34406ms +Aug 24 12:01:39.385: INFO: The phase of Pod pod-with-poststart-http-hook is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:01:41.394: INFO: Pod "pod-with-poststart-http-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.018131496s +Aug 24 12:01:41.394: INFO: The phase of Pod pod-with-poststart-http-hook is Running (Ready = true) +Aug 24 12:01:41.394: INFO: Pod "pod-with-poststart-http-hook" satisfied condition "running and ready" +STEP: check poststart hook 08/24/23 12:01:41.399 +STEP: delete the pod with lifecycle hook 08/24/23 12:01:41.432 +Aug 24 12:01:41.446: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Aug 24 12:01:41.453: INFO: Pod pod-with-poststart-http-hook still exists +Aug 24 12:01:43.453: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Aug 24 12:01:43.459: INFO: Pod pod-with-poststart-http-hook still exists +Aug 24 12:01:45.454: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Aug 24 12:01:45.462: INFO: Pod pod-with-poststart-http-hook no longer exists [AfterEach] [sig-node] Container Lifecycle Hook test/e2e/framework/node/init/init.go:32 -Jul 29 15:54:08.901: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:01:45.463: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook tear down framework | framework.go:193 -STEP: Destroying namespace "container-lifecycle-hook-454" for this suite. 07/29/23 15:54:08.914 +STEP: Destroying namespace "container-lifecycle-hook-4257" for this suite. 08/24/23 12:01:45.474 ------------------------------ -• [SLOW TEST] [10.258 seconds] +• [SLOW TEST] [8.205 seconds] [sig-node] Container Lifecycle Hook test/e2e/common/node/framework.go:23 when create a pod with lifecycle hook test/e2e/common/node/lifecycle_hook.go:46 - should execute prestop exec hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:151 + should execute poststart http hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:167 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-node] Container Lifecycle Hook set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:53:58.671 - Jul 29 15:53:58.671: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-lifecycle-hook 07/29/23 15:53:58.673 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:53:58.71 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:53:58.719 - [BeforeEach] [sig-node] Container Lifecycle Hook - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:77 - STEP: create the container to handle the HTTPGet hook request. 07/29/23 15:53:58.739 - Jul 29 15:53:58.757: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-454" to be "running and ready" - Jul 29 15:53:58.764: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 6.5937ms - Jul 29 15:53:58.764: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:54:00.776: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018272105s - Jul 29 15:54:00.776: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:54:02.782: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 4.024329196s - Jul 29 15:54:02.782: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) - Jul 29 15:54:02.782: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" - [It] should execute prestop exec hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:151 - STEP: create the pod with lifecycle hook 07/29/23 15:54:02.79 - Jul 29 15:54:02.805: INFO: Waiting up to 5m0s for pod "pod-with-prestop-exec-hook" in namespace "container-lifecycle-hook-454" to be "running and ready" - Jul 29 15:54:02.822: INFO: Pod "pod-with-prestop-exec-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 16.687031ms - Jul 29 15:54:02.822: INFO: The phase of Pod pod-with-prestop-exec-hook is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:54:04.835: INFO: Pod "pod-with-prestop-exec-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 2.029613343s - Jul 29 15:54:04.835: INFO: The phase of Pod pod-with-prestop-exec-hook is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:54:06.830: INFO: Pod "pod-with-prestop-exec-hook": Phase="Running", Reason="", readiness=true. Elapsed: 4.024582714s - Jul 29 15:54:06.830: INFO: The phase of Pod pod-with-prestop-exec-hook is Running (Ready = true) - Jul 29 15:54:06.830: INFO: Pod "pod-with-prestop-exec-hook" satisfied condition "running and ready" - STEP: delete the pod with lifecycle hook 07/29/23 15:54:06.836 - Jul 29 15:54:06.849: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear - Jul 29 15:54:06.871: INFO: Pod pod-with-prestop-exec-hook still exists - Jul 29 15:54:08.873: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear - Jul 29 15:54:08.882: INFO: Pod pod-with-prestop-exec-hook no longer exists - STEP: check prestop hook 07/29/23 15:54:08.882 - [AfterEach] [sig-node] Container Lifecycle Hook - test/e2e/framework/node/init/init.go:32 - Jul 29 15:54:08.901: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook - tear down framework | framework.go:193 - STEP: Destroying namespace "container-lifecycle-hook-454" for this suite. 07/29/23 15:54:08.914 - << End Captured GinkgoWriter Output ------------------------------- -SSSS ------------------------------- -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - custom resource defaulting for requests and from storage works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:269 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:54:08.934 -Jul 29 15:54:08.935: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename custom-resource-definition 07/29/23 15:54:08.937 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:54:08.968 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:54:08.972 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:31 -[It] custom resource defaulting for requests and from storage works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:269 -Jul 29 15:54:08.978: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:32 -Jul 29 15:54:12.551: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - tear down framework | framework.go:193 -STEP: Destroying namespace "custom-resource-definition-63" for this suite. 07/29/23 15:54:12.563 ------------------------------- -• [3.642 seconds] -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - custom resource defaulting for requests and from storage works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:269 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:54:08.934 - Jul 29 15:54:08.935: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename custom-resource-definition 07/29/23 15:54:08.937 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:54:08.968 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:54:08.972 - [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:31 - [It] custom resource defaulting for requests and from storage works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:269 - Jul 29 15:54:08.978: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:32 - Jul 29 15:54:12.551: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - tear down framework | framework.go:193 - STEP: Destroying namespace "custom-resource-definition-63" for this suite. 07/29/23 15:54:12.563 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-apps] Daemon set [Serial] - should verify changes to a daemon set status [Conformance] - test/e2e/apps/daemon_set.go:873 -[BeforeEach] [sig-apps] Daemon set [Serial] - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:54:12.585 -Jul 29 15:54:12.585: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename daemonsets 07/29/23 15:54:12.587 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:54:12.646 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:54:12.695 -[BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 -[It] should verify changes to a daemon set status [Conformance] - test/e2e/apps/daemon_set.go:873 -STEP: Creating simple DaemonSet "daemon-set" 07/29/23 15:54:12.766 -STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 15:54:12.776 -Jul 29 15:54:12.793: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 15:54:12.793: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:13.819: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 15:54:13.819: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:14.809: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:14.810: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:15.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:15.813: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:16.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:16.812: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:17.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:17.812: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:18.811: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:18.811: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:19.822: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:19.822: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:20.814: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:20.815: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:21.814: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:21.814: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:22.808: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:22.808: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:23.816: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:23.816: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:24.813: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:24.813: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:25.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:25.812: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:26.819: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:26.819: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:27.808: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:27.808: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:28.813: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:28.813: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:29.815: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:29.815: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:30.810: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:30.810: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:31.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:31.812: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:32.815: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:32.815: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:33.814: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:33.814: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:34.811: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:34.811: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:35.823: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:35.823: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:36.809: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:36.811: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:37.811: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:37.811: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:38.813: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:38.813: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:39.810: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:39.810: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:40.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 15:54:40.812: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 15:54:41.813: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 -Jul 29 15:54:41.813: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set -STEP: Getting /status 07/29/23 15:54:41.82 -Jul 29 15:54:41.829: INFO: Daemon Set daemon-set has Conditions: [] -STEP: updating the DaemonSet Status 07/29/23 15:54:41.829 -Jul 29 15:54:41.846: INFO: updatedStatus.Conditions: []v1.DaemonSetCondition{v1.DaemonSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} -STEP: watching for the daemon set status to be updated 07/29/23 15:54:41.846 -Jul 29 15:54:41.851: INFO: Observed &DaemonSet event: ADDED -Jul 29 15:54:41.852: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.852: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.853: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.853: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.854: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.854: INFO: Found daemon set daemon-set in namespace daemonsets-4949 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] -Jul 29 15:54:41.854: INFO: Daemon set daemon-set has an updated status -STEP: patching the DaemonSet Status 07/29/23 15:54:41.855 -STEP: watching for the daemon set status to be patched 07/29/23 15:54:41.869 -Jul 29 15:54:41.875: INFO: Observed &DaemonSet event: ADDED -Jul 29 15:54:41.875: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.875: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.876: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.876: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.876: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.876: INFO: Observed daemon set daemon-set in namespace daemonsets-4949 with annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] -Jul 29 15:54:41.876: INFO: Observed &DaemonSet event: MODIFIED -Jul 29 15:54:41.876: INFO: Found daemon set daemon-set in namespace daemonsets-4949 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusPatched True 0001-01-01 00:00:00 +0000 UTC }] -Jul 29 15:54:41.876: INFO: Daemon set daemon-set has a patched status -[AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 -STEP: Deleting DaemonSet "daemon-set" 07/29/23 15:54:41.884 -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-4949, will wait for the garbage collector to delete the pods 07/29/23 15:54:41.884 -Jul 29 15:54:41.953: INFO: Deleting DaemonSet.extensions daemon-set took: 11.948973ms -Jul 29 15:54:42.054: INFO: Terminating DaemonSet.extensions daemon-set pods took: 101.220138ms -Jul 29 15:54:44.764: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 15:54:44.765: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set -Jul 29 15:54:44.771: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"9235"},"items":null} - -Jul 29 15:54:44.777: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"9235"},"items":null} - -[AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/framework/node/init/init.go:32 -Jul 29 15:54:44.804: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] - tear down framework | framework.go:193 -STEP: Destroying namespace "daemonsets-4949" for this suite. 07/29/23 15:54:44.814 ------------------------------- -• [SLOW TEST] [32.246 seconds] -[sig-apps] Daemon set [Serial] -test/e2e/apps/framework.go:23 - should verify changes to a daemon set status [Conformance] - test/e2e/apps/daemon_set.go:873 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Daemon set [Serial] - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:54:12.585 - Jul 29 15:54:12.585: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename daemonsets 07/29/23 15:54:12.587 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:54:12.646 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:54:12.695 - [BeforeEach] [sig-apps] Daemon set [Serial] + STEP: Creating a kubernetes client 08/24/23 12:01:37.282 + Aug 24 12:01:37.282: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-lifecycle-hook 08/24/23 12:01:37.285 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:37.317 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:37.322 + [BeforeEach] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 - [It] should verify changes to a daemon set status [Conformance] - test/e2e/apps/daemon_set.go:873 - STEP: Creating simple DaemonSet "daemon-set" 07/29/23 15:54:12.766 - STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 15:54:12.776 - Jul 29 15:54:12.793: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 15:54:12.793: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:13.819: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 15:54:13.819: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:14.809: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:14.810: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:15.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:15.813: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:16.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:16.812: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:17.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:17.812: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:18.811: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:18.811: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:19.822: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:19.822: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:20.814: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:20.815: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:21.814: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:21.814: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:22.808: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:22.808: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:23.816: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:23.816: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:24.813: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:24.813: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:25.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:25.812: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:26.819: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:26.819: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:27.808: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:27.808: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:28.813: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:28.813: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:29.815: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:29.815: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:30.810: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:30.810: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:31.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:31.812: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:32.815: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:32.815: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:33.814: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:33.814: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:34.811: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:34.811: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:35.823: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:35.823: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:36.809: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:36.811: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:37.811: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:37.811: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:38.813: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:38.813: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:39.810: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:39.810: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:40.812: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 15:54:40.812: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 15:54:41.813: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 - Jul 29 15:54:41.813: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set - STEP: Getting /status 07/29/23 15:54:41.82 - Jul 29 15:54:41.829: INFO: Daemon Set daemon-set has Conditions: [] - STEP: updating the DaemonSet Status 07/29/23 15:54:41.829 - Jul 29 15:54:41.846: INFO: updatedStatus.Conditions: []v1.DaemonSetCondition{v1.DaemonSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} - STEP: watching for the daemon set status to be updated 07/29/23 15:54:41.846 - Jul 29 15:54:41.851: INFO: Observed &DaemonSet event: ADDED - Jul 29 15:54:41.852: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.852: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.853: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.853: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.854: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.854: INFO: Found daemon set daemon-set in namespace daemonsets-4949 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] - Jul 29 15:54:41.854: INFO: Daemon set daemon-set has an updated status - STEP: patching the DaemonSet Status 07/29/23 15:54:41.855 - STEP: watching for the daemon set status to be patched 07/29/23 15:54:41.869 - Jul 29 15:54:41.875: INFO: Observed &DaemonSet event: ADDED - Jul 29 15:54:41.875: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.875: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.876: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.876: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.876: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.876: INFO: Observed daemon set daemon-set in namespace daemonsets-4949 with annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] - Jul 29 15:54:41.876: INFO: Observed &DaemonSet event: MODIFIED - Jul 29 15:54:41.876: INFO: Found daemon set daemon-set in namespace daemonsets-4949 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusPatched True 0001-01-01 00:00:00 +0000 UTC }] - Jul 29 15:54:41.876: INFO: Daemon set daemon-set has a patched status - [AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 - STEP: Deleting DaemonSet "daemon-set" 07/29/23 15:54:41.884 - STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-4949, will wait for the garbage collector to delete the pods 07/29/23 15:54:41.884 - Jul 29 15:54:41.953: INFO: Deleting DaemonSet.extensions daemon-set took: 11.948973ms - Jul 29 15:54:42.054: INFO: Terminating DaemonSet.extensions daemon-set pods took: 101.220138ms - Jul 29 15:54:44.764: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 15:54:44.765: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set - Jul 29 15:54:44.771: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"9235"},"items":null} - - Jul 29 15:54:44.777: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"9235"},"items":null} - - [AfterEach] [sig-apps] Daemon set [Serial] + [BeforeEach] when create a pod with lifecycle hook + test/e2e/common/node/lifecycle_hook.go:77 + STEP: create the container to handle the HTTPGet hook request. 08/24/23 12:01:37.333 + Aug 24 12:01:37.348: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-4257" to be "running and ready" + Aug 24 12:01:37.354: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 6.419517ms + Aug 24 12:01:37.355: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:01:39.361: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 2.013652621s + Aug 24 12:01:39.362: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) + Aug 24 12:01:39.362: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" + [It] should execute poststart http hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:167 + STEP: create the pod with lifecycle hook 08/24/23 12:01:39.367 + Aug 24 12:01:39.376: INFO: Waiting up to 5m0s for pod "pod-with-poststart-http-hook" in namespace "container-lifecycle-hook-4257" to be "running and ready" + Aug 24 12:01:39.385: INFO: Pod "pod-with-poststart-http-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 9.34406ms + Aug 24 12:01:39.385: INFO: The phase of Pod pod-with-poststart-http-hook is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:01:41.394: INFO: Pod "pod-with-poststart-http-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.018131496s + Aug 24 12:01:41.394: INFO: The phase of Pod pod-with-poststart-http-hook is Running (Ready = true) + Aug 24 12:01:41.394: INFO: Pod "pod-with-poststart-http-hook" satisfied condition "running and ready" + STEP: check poststart hook 08/24/23 12:01:41.399 + STEP: delete the pod with lifecycle hook 08/24/23 12:01:41.432 + Aug 24 12:01:41.446: INFO: Waiting for pod pod-with-poststart-http-hook to disappear + Aug 24 12:01:41.453: INFO: Pod pod-with-poststart-http-hook still exists + Aug 24 12:01:43.453: INFO: Waiting for pod pod-with-poststart-http-hook to disappear + Aug 24 12:01:43.459: INFO: Pod pod-with-poststart-http-hook still exists + Aug 24 12:01:45.454: INFO: Waiting for pod pod-with-poststart-http-hook to disappear + Aug 24 12:01:45.462: INFO: Pod pod-with-poststart-http-hook no longer exists + [AfterEach] [sig-node] Container Lifecycle Hook test/e2e/framework/node/init/init.go:32 - Jul 29 15:54:44.804: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + Aug 24 12:01:45.463: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook tear down framework | framework.go:193 - STEP: Destroying namespace "daemonsets-4949" for this suite. 07/29/23 15:54:44.814 + STEP: Destroying namespace "container-lifecycle-hook-4257" for this suite. 08/24/23 12:01:45.474 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSS +S ------------------------------ -[sig-api-machinery] ResourceQuota - should verify ResourceQuota with best effort scope. [Conformance] - test/e2e/apimachinery/resource_quota.go:803 -[BeforeEach] [sig-api-machinery] ResourceQuota +[sig-cli] Kubectl client Proxy server + should support --unix-socket=/path [Conformance] + test/e2e/kubectl/kubectl.go:1812 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:54:44.834 -Jul 29 15:54:44.834: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 15:54:44.839 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:54:44.871 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:54:44.878 -[BeforeEach] [sig-api-machinery] ResourceQuota +STEP: Creating a kubernetes client 08/24/23 12:01:45.488 +Aug 24 12:01:45.489: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:01:45.491 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:45.527 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:45.534 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[It] should verify ResourceQuota with best effort scope. [Conformance] - test/e2e/apimachinery/resource_quota.go:803 -STEP: Creating a ResourceQuota with best effort scope 07/29/23 15:54:44.882 -STEP: Ensuring ResourceQuota status is calculated 07/29/23 15:54:44.892 -STEP: Creating a ResourceQuota with not best effort scope 07/29/23 15:54:46.898 -STEP: Ensuring ResourceQuota status is calculated 07/29/23 15:54:46.921 -STEP: Creating a best-effort pod 07/29/23 15:54:48.929 -STEP: Ensuring resource quota with best effort scope captures the pod usage 07/29/23 15:54:48.961 -STEP: Ensuring resource quota with not best effort ignored the pod usage 07/29/23 15:54:50.97 -STEP: Deleting the pod 07/29/23 15:54:52.981 -STEP: Ensuring resource quota status released the pod usage 07/29/23 15:54:53.001 -STEP: Creating a not best-effort pod 07/29/23 15:54:55.009 -STEP: Ensuring resource quota with not best effort scope captures the pod usage 07/29/23 15:54:55.029 -STEP: Ensuring resource quota with best effort scope ignored the pod usage 07/29/23 15:54:57.038 -STEP: Deleting the pod 07/29/23 15:54:59.045 -STEP: Ensuring resource quota status released the pod usage 07/29/23 15:54:59.064 -[AfterEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[It] should support --unix-socket=/path [Conformance] + test/e2e/kubectl/kubectl.go:1812 +STEP: Starting the proxy 08/24/23 12:01:45.539 +Aug 24 12:01:45.541: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-6656 proxy --unix-socket=/tmp/kubectl-proxy-unix3741190031/test' +STEP: retrieving proxy /api/ output 08/24/23 12:01:45.665 +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 15:55:01.078: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +Aug 24 12:01:45.669: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-1894" for this suite. 07/29/23 15:55:01.095 +STEP: Destroying namespace "kubectl-6656" for this suite. 08/24/23 12:01:45.678 ------------------------------ -• [SLOW TEST] [16.276 seconds] -[sig-api-machinery] ResourceQuota -test/e2e/apimachinery/framework.go:23 - should verify ResourceQuota with best effort scope. [Conformance] - test/e2e/apimachinery/resource_quota.go:803 +• [0.203 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Proxy server + test/e2e/kubectl/kubectl.go:1780 + should support --unix-socket=/path [Conformance] + test/e2e/kubectl/kubectl.go:1812 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:54:44.834 - Jul 29 15:54:44.834: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 15:54:44.839 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:54:44.871 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:54:44.878 - [BeforeEach] [sig-api-machinery] ResourceQuota + STEP: Creating a kubernetes client 08/24/23 12:01:45.488 + Aug 24 12:01:45.489: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:01:45.491 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:45.527 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:45.534 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [It] should verify ResourceQuota with best effort scope. [Conformance] - test/e2e/apimachinery/resource_quota.go:803 - STEP: Creating a ResourceQuota with best effort scope 07/29/23 15:54:44.882 - STEP: Ensuring ResourceQuota status is calculated 07/29/23 15:54:44.892 - STEP: Creating a ResourceQuota with not best effort scope 07/29/23 15:54:46.898 - STEP: Ensuring ResourceQuota status is calculated 07/29/23 15:54:46.921 - STEP: Creating a best-effort pod 07/29/23 15:54:48.929 - STEP: Ensuring resource quota with best effort scope captures the pod usage 07/29/23 15:54:48.961 - STEP: Ensuring resource quota with not best effort ignored the pod usage 07/29/23 15:54:50.97 - STEP: Deleting the pod 07/29/23 15:54:52.981 - STEP: Ensuring resource quota status released the pod usage 07/29/23 15:54:53.001 - STEP: Creating a not best-effort pod 07/29/23 15:54:55.009 - STEP: Ensuring resource quota with not best effort scope captures the pod usage 07/29/23 15:54:55.029 - STEP: Ensuring resource quota with best effort scope ignored the pod usage 07/29/23 15:54:57.038 - STEP: Deleting the pod 07/29/23 15:54:59.045 - STEP: Ensuring resource quota status released the pod usage 07/29/23 15:54:59.064 - [AfterEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [It] should support --unix-socket=/path [Conformance] + test/e2e/kubectl/kubectl.go:1812 + STEP: Starting the proxy 08/24/23 12:01:45.539 + Aug 24 12:01:45.541: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-6656 proxy --unix-socket=/tmp/kubectl-proxy-unix3741190031/test' + STEP: retrieving proxy /api/ output 08/24/23 12:01:45.665 + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 15:55:01.078: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + Aug 24 12:01:45.669: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-1894" for this suite. 07/29/23 15:55:01.095 + STEP: Destroying namespace "kubectl-6656" for this suite. 08/24/23 12:01:45.678 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] - test/e2e/network/service.go:2250 -[BeforeEach] [sig-network] Services +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:89 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:55:01.114 -Jul 29 15:55:01.114: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 15:55:01.116 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:01.151 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:01.158 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 12:01:45.696 +Aug 24 12:01:45.696: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 12:01:45.699 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:45.728 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:45.733 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] - test/e2e/network/service.go:2250 -STEP: creating service in namespace services-9048 07/29/23 15:55:01.164 -STEP: creating service affinity-nodeport-transition in namespace services-9048 07/29/23 15:55:01.165 -STEP: creating replication controller affinity-nodeport-transition in namespace services-9048 07/29/23 15:55:01.206 -I0729 15:55:01.218825 13 runners.go:193] Created replication controller with name: affinity-nodeport-transition, namespace: services-9048, replica count: 3 -I0729 15:55:04.270293 13 runners.go:193] affinity-nodeport-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Jul 29 15:55:04.293: INFO: Creating new exec pod -Jul 29 15:55:04.316: INFO: Waiting up to 5m0s for pod "execpod-affinityx49fs" in namespace "services-9048" to be "running" -Jul 29 15:55:04.324: INFO: Pod "execpod-affinityx49fs": Phase="Pending", Reason="", readiness=false. Elapsed: 7.003227ms -Jul 29 15:55:06.332: INFO: Pod "execpod-affinityx49fs": Phase="Running", Reason="", readiness=true. Elapsed: 2.015230188s -Jul 29 15:55:06.332: INFO: Pod "execpod-affinityx49fs" satisfied condition "running" -Jul 29 15:55:07.344: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c nc -v -z -w 2 affinity-nodeport-transition 80' -Jul 29 15:55:07.649: INFO: stderr: "+ nc -v -z -w 2 affinity-nodeport-transition 80\nConnection to affinity-nodeport-transition 80 port [tcp/http] succeeded!\n" -Jul 29 15:55:07.649: INFO: stdout: "" -Jul 29 15:55:07.650: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c nc -v -z -w 2 10.233.51.120 80' -Jul 29 15:55:07.875: INFO: stderr: "+ nc -v -z -w 2 10.233.51.120 80\nConnection to 10.233.51.120 80 port [tcp/http] succeeded!\n" -Jul 29 15:55:07.875: INFO: stdout: "" -Jul 29 15:55:07.875: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c nc -v -z -w 2 192.168.121.120 31909' -Jul 29 15:55:08.113: INFO: stderr: "+ nc -v -z -w 2 192.168.121.120 31909\nConnection to 192.168.121.120 31909 port [tcp/*] succeeded!\n" -Jul 29 15:55:08.113: INFO: stdout: "" -Jul 29 15:55:08.113: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c nc -v -z -w 2 192.168.121.211 31909' -Jul 29 15:55:08.325: INFO: stderr: "+ nc -v -z -w 2 192.168.121.211 31909\nConnection to 192.168.121.211 31909 port [tcp/*] succeeded!\n" -Jul 29 15:55:08.325: INFO: stdout: "" -Jul 29 15:55:08.346: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.120:31909/ ; done' -Jul 29 15:55:08.827: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n" -Jul 29 15:55:08.827: INFO: stdout: "\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-h4h5v\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-h4h5v" -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-h4h5v -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c -Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-h4h5v -Jul 29 15:55:08.843: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.120:31909/ ; done' -Jul 29 15:55:09.265: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n" -Jul 29 15:55:09.265: INFO: stdout: "\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s" -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s -Jul 29 15:55:09.265: INFO: Cleaning up the exec pod -STEP: deleting ReplicationController affinity-nodeport-transition in namespace services-9048, will wait for the garbage collector to delete the pods 07/29/23 15:55:09.297 -Jul 29 15:55:09.384: INFO: Deleting ReplicationController affinity-nodeport-transition took: 16.868359ms -Jul 29 15:55:09.485: INFO: Terminating ReplicationController affinity-nodeport-transition pods took: 100.823659ms -[AfterEach] [sig-network] Services +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:89 +STEP: Creating configMap with name configmap-test-volume-map-8fd696fc-a305-4657-b734-7a06bfc81f62 08/24/23 12:01:45.737 +STEP: Creating a pod to test consume configMaps 08/24/23 12:01:45.747 +Aug 24 12:01:45.765: INFO: Waiting up to 5m0s for pod "pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a" in namespace "configmap-6201" to be "Succeeded or Failed" +Aug 24 12:01:45.773: INFO: Pod "pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a": Phase="Pending", Reason="", readiness=false. Elapsed: 7.029773ms +Aug 24 12:01:47.780: INFO: Pod "pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014469325s +Aug 24 12:01:49.781: INFO: Pod "pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015868159s +STEP: Saw pod success 08/24/23 12:01:49.782 +Aug 24 12:01:49.782: INFO: Pod "pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a" satisfied condition "Succeeded or Failed" +Aug 24 12:01:49.787: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a container agnhost-container: +STEP: delete the pod 08/24/23 12:01:49.801 +Aug 24 12:01:49.828: INFO: Waiting for pod pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a to disappear +Aug 24 12:01:49.835: INFO: Pod pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a no longer exists +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 15:55:11.977: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 12:01:49.835: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "services-9048" for this suite. 07/29/23 15:55:11.987 +STEP: Destroying namespace "configmap-6201" for this suite. 08/24/23 12:01:49.849 ------------------------------ -• [SLOW TEST] [10.910 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] - test/e2e/network/service.go:2250 +• [4.168 seconds] +[sig-storage] ConfigMap +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:89 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:55:01.114 - Jul 29 15:55:01.114: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 15:55:01.116 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:01.151 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:01.158 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 12:01:45.696 + Aug 24 12:01:45.696: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 12:01:45.699 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:45.728 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:45.733 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] - test/e2e/network/service.go:2250 - STEP: creating service in namespace services-9048 07/29/23 15:55:01.164 - STEP: creating service affinity-nodeport-transition in namespace services-9048 07/29/23 15:55:01.165 - STEP: creating replication controller affinity-nodeport-transition in namespace services-9048 07/29/23 15:55:01.206 - I0729 15:55:01.218825 13 runners.go:193] Created replication controller with name: affinity-nodeport-transition, namespace: services-9048, replica count: 3 - I0729 15:55:04.270293 13 runners.go:193] affinity-nodeport-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - Jul 29 15:55:04.293: INFO: Creating new exec pod - Jul 29 15:55:04.316: INFO: Waiting up to 5m0s for pod "execpod-affinityx49fs" in namespace "services-9048" to be "running" - Jul 29 15:55:04.324: INFO: Pod "execpod-affinityx49fs": Phase="Pending", Reason="", readiness=false. Elapsed: 7.003227ms - Jul 29 15:55:06.332: INFO: Pod "execpod-affinityx49fs": Phase="Running", Reason="", readiness=true. Elapsed: 2.015230188s - Jul 29 15:55:06.332: INFO: Pod "execpod-affinityx49fs" satisfied condition "running" - Jul 29 15:55:07.344: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c nc -v -z -w 2 affinity-nodeport-transition 80' - Jul 29 15:55:07.649: INFO: stderr: "+ nc -v -z -w 2 affinity-nodeport-transition 80\nConnection to affinity-nodeport-transition 80 port [tcp/http] succeeded!\n" - Jul 29 15:55:07.649: INFO: stdout: "" - Jul 29 15:55:07.650: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c nc -v -z -w 2 10.233.51.120 80' - Jul 29 15:55:07.875: INFO: stderr: "+ nc -v -z -w 2 10.233.51.120 80\nConnection to 10.233.51.120 80 port [tcp/http] succeeded!\n" - Jul 29 15:55:07.875: INFO: stdout: "" - Jul 29 15:55:07.875: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c nc -v -z -w 2 192.168.121.120 31909' - Jul 29 15:55:08.113: INFO: stderr: "+ nc -v -z -w 2 192.168.121.120 31909\nConnection to 192.168.121.120 31909 port [tcp/*] succeeded!\n" - Jul 29 15:55:08.113: INFO: stdout: "" - Jul 29 15:55:08.113: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c nc -v -z -w 2 192.168.121.211 31909' - Jul 29 15:55:08.325: INFO: stderr: "+ nc -v -z -w 2 192.168.121.211 31909\nConnection to 192.168.121.211 31909 port [tcp/*] succeeded!\n" - Jul 29 15:55:08.325: INFO: stdout: "" - Jul 29 15:55:08.346: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.120:31909/ ; done' - Jul 29 15:55:08.827: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n" - Jul 29 15:55:08.827: INFO: stdout: "\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-h4h5v\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-tqj4c\naffinity-nodeport-transition-h4h5v" - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-h4h5v - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-tqj4c - Jul 29 15:55:08.827: INFO: Received response from host: affinity-nodeport-transition-h4h5v - Jul 29 15:55:08.843: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9048 exec execpod-affinityx49fs -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.120:31909/ ; done' - Jul 29 15:55:09.265: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31909/\n" - Jul 29 15:55:09.265: INFO: stdout: "\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s\naffinity-nodeport-transition-bsw2s" - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Received response from host: affinity-nodeport-transition-bsw2s - Jul 29 15:55:09.265: INFO: Cleaning up the exec pod - STEP: deleting ReplicationController affinity-nodeport-transition in namespace services-9048, will wait for the garbage collector to delete the pods 07/29/23 15:55:09.297 - Jul 29 15:55:09.384: INFO: Deleting ReplicationController affinity-nodeport-transition took: 16.868359ms - Jul 29 15:55:09.485: INFO: Terminating ReplicationController affinity-nodeport-transition pods took: 100.823659ms - [AfterEach] [sig-network] Services + [It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:89 + STEP: Creating configMap with name configmap-test-volume-map-8fd696fc-a305-4657-b734-7a06bfc81f62 08/24/23 12:01:45.737 + STEP: Creating a pod to test consume configMaps 08/24/23 12:01:45.747 + Aug 24 12:01:45.765: INFO: Waiting up to 5m0s for pod "pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a" in namespace "configmap-6201" to be "Succeeded or Failed" + Aug 24 12:01:45.773: INFO: Pod "pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a": Phase="Pending", Reason="", readiness=false. Elapsed: 7.029773ms + Aug 24 12:01:47.780: INFO: Pod "pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014469325s + Aug 24 12:01:49.781: INFO: Pod "pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015868159s + STEP: Saw pod success 08/24/23 12:01:49.782 + Aug 24 12:01:49.782: INFO: Pod "pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a" satisfied condition "Succeeded or Failed" + Aug 24 12:01:49.787: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a container agnhost-container: + STEP: delete the pod 08/24/23 12:01:49.801 + Aug 24 12:01:49.828: INFO: Waiting for pod pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a to disappear + Aug 24 12:01:49.835: INFO: Pod pod-configmaps-325b6199-ad91-4c71-b596-ba6803b78c8a no longer exists + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 15:55:11.977: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 12:01:49.835: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "services-9048" for this suite. 07/29/23 15:55:11.987 + STEP: Destroying namespace "configmap-6201" for this suite. 08/24/23 12:01:49.849 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +S ------------------------------ [sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance] test/e2e/kubectl/kubectl.go:1415 [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:55:12.032 -Jul 29 15:55:12.032: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 15:55:12.036 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:12.068 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:12.073 +STEP: Creating a kubernetes client 08/24/23 12:01:49.869 +Aug 24 12:01:49.869: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:01:49.871 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:49.902 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:49.908 [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 [It] should create services for rc [Conformance] test/e2e/kubectl/kubectl.go:1415 -STEP: creating Agnhost RC 07/29/23 15:55:12.079 -Jul 29 15:55:12.079: INFO: namespace kubectl-9853 -Jul 29 15:55:12.080: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9853 create -f -' -Jul 29 15:55:13.870: INFO: stderr: "" -Jul 29 15:55:13.870: INFO: stdout: "replicationcontroller/agnhost-primary created\n" -STEP: Waiting for Agnhost primary to start. 07/29/23 15:55:13.87 -Jul 29 15:55:14.883: INFO: Selector matched 1 pods for map[app:agnhost] -Jul 29 15:55:14.883: INFO: Found 0 / 1 -Jul 29 15:55:15.932: INFO: Selector matched 1 pods for map[app:agnhost] -Jul 29 15:55:15.932: INFO: Found 1 / 1 -Jul 29 15:55:15.932: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 -Jul 29 15:55:15.939: INFO: Selector matched 1 pods for map[app:agnhost] -Jul 29 15:55:15.939: INFO: ForEach: Found 1 pods from the filter. Now looping through them. -Jul 29 15:55:15.939: INFO: wait on agnhost-primary startup in kubectl-9853 -Jul 29 15:55:15.939: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9853 logs agnhost-primary-8k2hf agnhost-primary' -Jul 29 15:55:16.120: INFO: stderr: "" -Jul 29 15:55:16.121: INFO: stdout: "Paused\n" -STEP: exposing RC 07/29/23 15:55:16.121 -Jul 29 15:55:16.122: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9853 expose rc agnhost-primary --name=rm2 --port=1234 --target-port=6379' -Jul 29 15:55:16.328: INFO: stderr: "" -Jul 29 15:55:16.328: INFO: stdout: "service/rm2 exposed\n" -Jul 29 15:55:16.336: INFO: Service rm2 in namespace kubectl-9853 found. -STEP: exposing service 07/29/23 15:55:18.352 -Jul 29 15:55:18.353: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9853 expose service rm2 --name=rm3 --port=2345 --target-port=6379' -Jul 29 15:55:18.551: INFO: stderr: "" -Jul 29 15:55:18.551: INFO: stdout: "service/rm3 exposed\n" -Jul 29 15:55:18.560: INFO: Service rm3 in namespace kubectl-9853 found. +STEP: creating Agnhost RC 08/24/23 12:01:49.916 +Aug 24 12:01:49.917: INFO: namespace kubectl-8510 +Aug 24 12:01:49.917: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8510 create -f -' +Aug 24 12:01:50.466: INFO: stderr: "" +Aug 24 12:01:50.466: INFO: stdout: "replicationcontroller/agnhost-primary created\n" +STEP: Waiting for Agnhost primary to start. 08/24/23 12:01:50.466 +Aug 24 12:01:51.482: INFO: Selector matched 1 pods for map[app:agnhost] +Aug 24 12:01:51.482: INFO: Found 0 / 1 +Aug 24 12:01:52.475: INFO: Selector matched 1 pods for map[app:agnhost] +Aug 24 12:01:52.475: INFO: Found 1 / 1 +Aug 24 12:01:52.475: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Aug 24 12:01:52.480: INFO: Selector matched 1 pods for map[app:agnhost] +Aug 24 12:01:52.480: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Aug 24 12:01:52.480: INFO: wait on agnhost-primary startup in kubectl-8510 +Aug 24 12:01:52.481: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8510 logs agnhost-primary-8m9gp agnhost-primary' +Aug 24 12:01:52.660: INFO: stderr: "" +Aug 24 12:01:52.660: INFO: stdout: "Paused\n" +STEP: exposing RC 08/24/23 12:01:52.66 +Aug 24 12:01:52.661: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8510 expose rc agnhost-primary --name=rm2 --port=1234 --target-port=6379' +Aug 24 12:01:52.858: INFO: stderr: "" +Aug 24 12:01:52.858: INFO: stdout: "service/rm2 exposed\n" +Aug 24 12:01:52.873: INFO: Service rm2 in namespace kubectl-8510 found. +STEP: exposing service 08/24/23 12:01:54.889 +Aug 24 12:01:54.890: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8510 expose service rm2 --name=rm3 --port=2345 --target-port=6379' +Aug 24 12:01:55.138: INFO: stderr: "" +Aug 24 12:01:55.138: INFO: stdout: "service/rm3 exposed\n" +Aug 24 12:01:55.149: INFO: Service rm3 in namespace kubectl-8510 found. [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 15:55:20.572: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:01:57.161: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-9853" for this suite. 07/29/23 15:55:20.582 +STEP: Destroying namespace "kubectl-8510" for this suite. 08/24/23 12:01:57.17 ------------------------------ -• [SLOW TEST] [8.562 seconds] +• [SLOW TEST] [7.316 seconds] [sig-cli] Kubectl client test/e2e/kubectl/framework.go:23 Kubectl expose @@ -6686,4928 +6700,4725 @@ test/e2e/kubectl/framework.go:23 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:55:12.032 - Jul 29 15:55:12.032: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 15:55:12.036 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:12.068 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:12.073 + STEP: Creating a kubernetes client 08/24/23 12:01:49.869 + Aug 24 12:01:49.869: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:01:49.871 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:49.902 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:49.908 [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 [It] should create services for rc [Conformance] test/e2e/kubectl/kubectl.go:1415 - STEP: creating Agnhost RC 07/29/23 15:55:12.079 - Jul 29 15:55:12.079: INFO: namespace kubectl-9853 - Jul 29 15:55:12.080: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9853 create -f -' - Jul 29 15:55:13.870: INFO: stderr: "" - Jul 29 15:55:13.870: INFO: stdout: "replicationcontroller/agnhost-primary created\n" - STEP: Waiting for Agnhost primary to start. 07/29/23 15:55:13.87 - Jul 29 15:55:14.883: INFO: Selector matched 1 pods for map[app:agnhost] - Jul 29 15:55:14.883: INFO: Found 0 / 1 - Jul 29 15:55:15.932: INFO: Selector matched 1 pods for map[app:agnhost] - Jul 29 15:55:15.932: INFO: Found 1 / 1 - Jul 29 15:55:15.932: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 - Jul 29 15:55:15.939: INFO: Selector matched 1 pods for map[app:agnhost] - Jul 29 15:55:15.939: INFO: ForEach: Found 1 pods from the filter. Now looping through them. - Jul 29 15:55:15.939: INFO: wait on agnhost-primary startup in kubectl-9853 - Jul 29 15:55:15.939: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9853 logs agnhost-primary-8k2hf agnhost-primary' - Jul 29 15:55:16.120: INFO: stderr: "" - Jul 29 15:55:16.121: INFO: stdout: "Paused\n" - STEP: exposing RC 07/29/23 15:55:16.121 - Jul 29 15:55:16.122: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9853 expose rc agnhost-primary --name=rm2 --port=1234 --target-port=6379' - Jul 29 15:55:16.328: INFO: stderr: "" - Jul 29 15:55:16.328: INFO: stdout: "service/rm2 exposed\n" - Jul 29 15:55:16.336: INFO: Service rm2 in namespace kubectl-9853 found. - STEP: exposing service 07/29/23 15:55:18.352 - Jul 29 15:55:18.353: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9853 expose service rm2 --name=rm3 --port=2345 --target-port=6379' - Jul 29 15:55:18.551: INFO: stderr: "" - Jul 29 15:55:18.551: INFO: stdout: "service/rm3 exposed\n" - Jul 29 15:55:18.560: INFO: Service rm3 in namespace kubectl-9853 found. + STEP: creating Agnhost RC 08/24/23 12:01:49.916 + Aug 24 12:01:49.917: INFO: namespace kubectl-8510 + Aug 24 12:01:49.917: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8510 create -f -' + Aug 24 12:01:50.466: INFO: stderr: "" + Aug 24 12:01:50.466: INFO: stdout: "replicationcontroller/agnhost-primary created\n" + STEP: Waiting for Agnhost primary to start. 08/24/23 12:01:50.466 + Aug 24 12:01:51.482: INFO: Selector matched 1 pods for map[app:agnhost] + Aug 24 12:01:51.482: INFO: Found 0 / 1 + Aug 24 12:01:52.475: INFO: Selector matched 1 pods for map[app:agnhost] + Aug 24 12:01:52.475: INFO: Found 1 / 1 + Aug 24 12:01:52.475: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 + Aug 24 12:01:52.480: INFO: Selector matched 1 pods for map[app:agnhost] + Aug 24 12:01:52.480: INFO: ForEach: Found 1 pods from the filter. Now looping through them. + Aug 24 12:01:52.480: INFO: wait on agnhost-primary startup in kubectl-8510 + Aug 24 12:01:52.481: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8510 logs agnhost-primary-8m9gp agnhost-primary' + Aug 24 12:01:52.660: INFO: stderr: "" + Aug 24 12:01:52.660: INFO: stdout: "Paused\n" + STEP: exposing RC 08/24/23 12:01:52.66 + Aug 24 12:01:52.661: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8510 expose rc agnhost-primary --name=rm2 --port=1234 --target-port=6379' + Aug 24 12:01:52.858: INFO: stderr: "" + Aug 24 12:01:52.858: INFO: stdout: "service/rm2 exposed\n" + Aug 24 12:01:52.873: INFO: Service rm2 in namespace kubectl-8510 found. + STEP: exposing service 08/24/23 12:01:54.889 + Aug 24 12:01:54.890: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8510 expose service rm2 --name=rm3 --port=2345 --target-port=6379' + Aug 24 12:01:55.138: INFO: stderr: "" + Aug 24 12:01:55.138: INFO: stdout: "service/rm3 exposed\n" + Aug 24 12:01:55.149: INFO: Service rm3 in namespace kubectl-8510 found. [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 15:55:20.572: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:01:57.161: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-9853" for this suite. 07/29/23 15:55:20.582 + STEP: Destroying namespace "kubectl-8510" for this suite. 08/24/23 12:01:57.17 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Subpath Atomic writer volumes - should support subpaths with configmap pod [Conformance] - test/e2e/storage/subpath.go:70 -[BeforeEach] [sig-storage] Subpath +[sig-cli] Kubectl client Update Demo + should create and stop a replication controller [Conformance] + test/e2e/kubectl/kubectl.go:339 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:55:20.596 -Jul 29 15:55:20.597: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename subpath 07/29/23 15:55:20.6 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:20.634 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:20.639 -[BeforeEach] [sig-storage] Subpath +STEP: Creating a kubernetes client 08/24/23 12:01:57.188 +Aug 24 12:01:57.188: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:01:57.19 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:57.22 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:57.225 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] Atomic writer volumes - test/e2e/storage/subpath.go:40 -STEP: Setting up data 07/29/23 15:55:20.644 -[It] should support subpaths with configmap pod [Conformance] - test/e2e/storage/subpath.go:70 -STEP: Creating pod pod-subpath-test-configmap-fjx4 07/29/23 15:55:20.661 -STEP: Creating a pod to test atomic-volume-subpath 07/29/23 15:55:20.661 -Jul 29 15:55:20.685: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-fjx4" in namespace "subpath-9644" to be "Succeeded or Failed" -Jul 29 15:55:20.697: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Pending", Reason="", readiness=false. Elapsed: 11.544539ms -Jul 29 15:55:22.703: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 2.018144861s -Jul 29 15:55:24.708: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 4.022527583s -Jul 29 15:55:26.706: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 6.020967371s -Jul 29 15:55:28.716: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 8.03128047s -Jul 29 15:55:30.706: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 10.020926569s -Jul 29 15:55:32.705: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 12.019782466s -Jul 29 15:55:34.712: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 14.027393867s -Jul 29 15:55:36.705: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 16.020364537s -Jul 29 15:55:38.707: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 18.021628893s -Jul 29 15:55:40.709: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 20.023876887s -Jul 29 15:55:42.706: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=false. Elapsed: 22.020594319s -Jul 29 15:55:44.707: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.022388813s -STEP: Saw pod success 07/29/23 15:55:44.708 -Jul 29 15:55:44.708: INFO: Pod "pod-subpath-test-configmap-fjx4" satisfied condition "Succeeded or Failed" -Jul 29 15:55:44.715: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-subpath-test-configmap-fjx4 container test-container-subpath-configmap-fjx4: -STEP: delete the pod 07/29/23 15:55:44.748 -Jul 29 15:55:44.769: INFO: Waiting for pod pod-subpath-test-configmap-fjx4 to disappear -Jul 29 15:55:44.777: INFO: Pod pod-subpath-test-configmap-fjx4 no longer exists -STEP: Deleting pod pod-subpath-test-configmap-fjx4 07/29/23 15:55:44.777 -Jul 29 15:55:44.777: INFO: Deleting pod "pod-subpath-test-configmap-fjx4" in namespace "subpath-9644" -[AfterEach] [sig-storage] Subpath +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[BeforeEach] Update Demo + test/e2e/kubectl/kubectl.go:326 +[It] should create and stop a replication controller [Conformance] + test/e2e/kubectl/kubectl.go:339 +STEP: creating a replication controller 08/24/23 12:01:57.23 +Aug 24 12:01:57.231: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 create -f -' +Aug 24 12:01:57.639: INFO: stderr: "" +Aug 24 12:01:57.639: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. 08/24/23 12:01:57.639 +Aug 24 12:01:57.640: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Aug 24 12:01:57.823: INFO: stderr: "" +Aug 24 12:01:57.823: INFO: stdout: "update-demo-nautilus-wxkl7 update-demo-nautilus-z8qwl " +Aug 24 12:01:57.823: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods update-demo-nautilus-wxkl7 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:01:57.985: INFO: stderr: "" +Aug 24 12:01:57.985: INFO: stdout: "" +Aug 24 12:01:57.985: INFO: update-demo-nautilus-wxkl7 is created but not running +Aug 24 12:02:02.986: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Aug 24 12:02:03.159: INFO: stderr: "" +Aug 24 12:02:03.159: INFO: stdout: "update-demo-nautilus-wxkl7 update-demo-nautilus-z8qwl " +Aug 24 12:02:03.159: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods update-demo-nautilus-wxkl7 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:02:03.340: INFO: stderr: "" +Aug 24 12:02:03.340: INFO: stdout: "true" +Aug 24 12:02:03.341: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods update-demo-nautilus-wxkl7 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Aug 24 12:02:03.510: INFO: stderr: "" +Aug 24 12:02:03.510: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" +Aug 24 12:02:03.510: INFO: validating pod update-demo-nautilus-wxkl7 +Aug 24 12:02:03.524: INFO: got data: { + "image": "nautilus.jpg" +} + +Aug 24 12:02:03.524: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Aug 24 12:02:03.524: INFO: update-demo-nautilus-wxkl7 is verified up and running +Aug 24 12:02:03.525: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods update-demo-nautilus-z8qwl -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Aug 24 12:02:03.662: INFO: stderr: "" +Aug 24 12:02:03.662: INFO: stdout: "true" +Aug 24 12:02:03.663: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods update-demo-nautilus-z8qwl -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Aug 24 12:02:03.801: INFO: stderr: "" +Aug 24 12:02:03.801: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" +Aug 24 12:02:03.801: INFO: validating pod update-demo-nautilus-z8qwl +Aug 24 12:02:03.815: INFO: got data: { + "image": "nautilus.jpg" +} + +Aug 24 12:02:03.815: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Aug 24 12:02:03.815: INFO: update-demo-nautilus-z8qwl is verified up and running +STEP: using delete to clean up resources 08/24/23 12:02:03.815 +Aug 24 12:02:03.816: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 delete --grace-period=0 --force -f -' +Aug 24 12:02:04.000: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Aug 24 12:02:04.000: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +Aug 24 12:02:04.000: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get rc,svc -l name=update-demo --no-headers' +Aug 24 12:02:04.230: INFO: stderr: "No resources found in kubectl-3825 namespace.\n" +Aug 24 12:02:04.230: INFO: stdout: "" +Aug 24 12:02:04.231: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Aug 24 12:02:04.419: INFO: stderr: "" +Aug 24 12:02:04.419: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 15:55:44.784: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Subpath +Aug 24 12:02:04.419: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Subpath +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Subpath +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "subpath-9644" for this suite. 07/29/23 15:55:44.792 +STEP: Destroying namespace "kubectl-3825" for this suite. 08/24/23 12:02:04.432 ------------------------------ -• [SLOW TEST] [24.207 seconds] -[sig-storage] Subpath -test/e2e/storage/utils/framework.go:23 - Atomic writer volumes - test/e2e/storage/subpath.go:36 - should support subpaths with configmap pod [Conformance] - test/e2e/storage/subpath.go:70 +• [SLOW TEST] [7.260 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Update Demo + test/e2e/kubectl/kubectl.go:324 + should create and stop a replication controller [Conformance] + test/e2e/kubectl/kubectl.go:339 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Subpath + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:55:20.596 - Jul 29 15:55:20.597: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename subpath 07/29/23 15:55:20.6 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:20.634 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:20.639 - [BeforeEach] [sig-storage] Subpath + STEP: Creating a kubernetes client 08/24/23 12:01:57.188 + Aug 24 12:01:57.188: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:01:57.19 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:01:57.22 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:01:57.225 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] Atomic writer volumes - test/e2e/storage/subpath.go:40 - STEP: Setting up data 07/29/23 15:55:20.644 - [It] should support subpaths with configmap pod [Conformance] - test/e2e/storage/subpath.go:70 - STEP: Creating pod pod-subpath-test-configmap-fjx4 07/29/23 15:55:20.661 - STEP: Creating a pod to test atomic-volume-subpath 07/29/23 15:55:20.661 - Jul 29 15:55:20.685: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-fjx4" in namespace "subpath-9644" to be "Succeeded or Failed" - Jul 29 15:55:20.697: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Pending", Reason="", readiness=false. Elapsed: 11.544539ms - Jul 29 15:55:22.703: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 2.018144861s - Jul 29 15:55:24.708: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 4.022527583s - Jul 29 15:55:26.706: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 6.020967371s - Jul 29 15:55:28.716: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 8.03128047s - Jul 29 15:55:30.706: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 10.020926569s - Jul 29 15:55:32.705: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 12.019782466s - Jul 29 15:55:34.712: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 14.027393867s - Jul 29 15:55:36.705: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 16.020364537s - Jul 29 15:55:38.707: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 18.021628893s - Jul 29 15:55:40.709: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=true. Elapsed: 20.023876887s - Jul 29 15:55:42.706: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Running", Reason="", readiness=false. Elapsed: 22.020594319s - Jul 29 15:55:44.707: INFO: Pod "pod-subpath-test-configmap-fjx4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.022388813s - STEP: Saw pod success 07/29/23 15:55:44.708 - Jul 29 15:55:44.708: INFO: Pod "pod-subpath-test-configmap-fjx4" satisfied condition "Succeeded or Failed" - Jul 29 15:55:44.715: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-subpath-test-configmap-fjx4 container test-container-subpath-configmap-fjx4: - STEP: delete the pod 07/29/23 15:55:44.748 - Jul 29 15:55:44.769: INFO: Waiting for pod pod-subpath-test-configmap-fjx4 to disappear - Jul 29 15:55:44.777: INFO: Pod pod-subpath-test-configmap-fjx4 no longer exists - STEP: Deleting pod pod-subpath-test-configmap-fjx4 07/29/23 15:55:44.777 - Jul 29 15:55:44.777: INFO: Deleting pod "pod-subpath-test-configmap-fjx4" in namespace "subpath-9644" - [AfterEach] [sig-storage] Subpath + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [BeforeEach] Update Demo + test/e2e/kubectl/kubectl.go:326 + [It] should create and stop a replication controller [Conformance] + test/e2e/kubectl/kubectl.go:339 + STEP: creating a replication controller 08/24/23 12:01:57.23 + Aug 24 12:01:57.231: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 create -f -' + Aug 24 12:01:57.639: INFO: stderr: "" + Aug 24 12:01:57.639: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" + STEP: waiting for all containers in name=update-demo pods to come up. 08/24/23 12:01:57.639 + Aug 24 12:01:57.640: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' + Aug 24 12:01:57.823: INFO: stderr: "" + Aug 24 12:01:57.823: INFO: stdout: "update-demo-nautilus-wxkl7 update-demo-nautilus-z8qwl " + Aug 24 12:01:57.823: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods update-demo-nautilus-wxkl7 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:01:57.985: INFO: stderr: "" + Aug 24 12:01:57.985: INFO: stdout: "" + Aug 24 12:01:57.985: INFO: update-demo-nautilus-wxkl7 is created but not running + Aug 24 12:02:02.986: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' + Aug 24 12:02:03.159: INFO: stderr: "" + Aug 24 12:02:03.159: INFO: stdout: "update-demo-nautilus-wxkl7 update-demo-nautilus-z8qwl " + Aug 24 12:02:03.159: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods update-demo-nautilus-wxkl7 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:02:03.340: INFO: stderr: "" + Aug 24 12:02:03.340: INFO: stdout: "true" + Aug 24 12:02:03.341: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods update-demo-nautilus-wxkl7 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' + Aug 24 12:02:03.510: INFO: stderr: "" + Aug 24 12:02:03.510: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" + Aug 24 12:02:03.510: INFO: validating pod update-demo-nautilus-wxkl7 + Aug 24 12:02:03.524: INFO: got data: { + "image": "nautilus.jpg" + } + + Aug 24 12:02:03.524: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . + Aug 24 12:02:03.524: INFO: update-demo-nautilus-wxkl7 is verified up and running + Aug 24 12:02:03.525: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods update-demo-nautilus-z8qwl -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' + Aug 24 12:02:03.662: INFO: stderr: "" + Aug 24 12:02:03.662: INFO: stdout: "true" + Aug 24 12:02:03.663: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods update-demo-nautilus-z8qwl -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' + Aug 24 12:02:03.801: INFO: stderr: "" + Aug 24 12:02:03.801: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" + Aug 24 12:02:03.801: INFO: validating pod update-demo-nautilus-z8qwl + Aug 24 12:02:03.815: INFO: got data: { + "image": "nautilus.jpg" + } + + Aug 24 12:02:03.815: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . + Aug 24 12:02:03.815: INFO: update-demo-nautilus-z8qwl is verified up and running + STEP: using delete to clean up resources 08/24/23 12:02:03.815 + Aug 24 12:02:03.816: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 delete --grace-period=0 --force -f -' + Aug 24 12:02:04.000: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" + Aug 24 12:02:04.000: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" + Aug 24 12:02:04.000: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get rc,svc -l name=update-demo --no-headers' + Aug 24 12:02:04.230: INFO: stderr: "No resources found in kubectl-3825 namespace.\n" + Aug 24 12:02:04.230: INFO: stdout: "" + Aug 24 12:02:04.231: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3825 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' + Aug 24 12:02:04.419: INFO: stderr: "" + Aug 24 12:02:04.419: INFO: stdout: "" + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 15:55:44.784: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Subpath + Aug 24 12:02:04.419: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Subpath + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Subpath + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "subpath-9644" for this suite. 07/29/23 15:55:44.792 + STEP: Destroying namespace "kubectl-3825" for this suite. 08/24/23 12:02:04.432 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSS ------------------------------ -[sig-storage] Downward API volume - should provide container's memory request [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:235 -[BeforeEach] [sig-storage] Downward API volume +[sig-network] IngressClass API + should support creating IngressClass API operations [Conformance] + test/e2e/network/ingressclass.go:223 +[BeforeEach] [sig-network] IngressClass API + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:02:04.45 +Aug 24 12:02:04.450: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename ingressclass 08/24/23 12:02:04.454 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:04.484 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:04.491 +[BeforeEach] [sig-network] IngressClass API + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-network] IngressClass API + test/e2e/network/ingressclass.go:211 +[It] should support creating IngressClass API operations [Conformance] + test/e2e/network/ingressclass.go:223 +STEP: getting /apis 08/24/23 12:02:04.503 +STEP: getting /apis/networking.k8s.io 08/24/23 12:02:04.512 +STEP: getting /apis/networking.k8s.iov1 08/24/23 12:02:04.515 +STEP: creating 08/24/23 12:02:04.518 +STEP: getting 08/24/23 12:02:04.547 +STEP: listing 08/24/23 12:02:04.555 +STEP: watching 08/24/23 12:02:04.563 +Aug 24 12:02:04.563: INFO: starting watch +STEP: patching 08/24/23 12:02:04.565 +STEP: updating 08/24/23 12:02:04.575 +Aug 24 12:02:04.583: INFO: waiting for watch events with expected annotations +Aug 24 12:02:04.583: INFO: saw patched and updated annotations +STEP: deleting 08/24/23 12:02:04.583 +STEP: deleting a collection 08/24/23 12:02:04.602 +[AfterEach] [sig-network] IngressClass API + test/e2e/framework/node/init/init.go:32 +Aug 24 12:02:04.632: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] IngressClass API + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-network] IngressClass API + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-network] IngressClass API + tear down framework | framework.go:193 +STEP: Destroying namespace "ingressclass-8680" for this suite. 08/24/23 12:02:04.644 +------------------------------ +• [0.208 seconds] +[sig-network] IngressClass API +test/e2e/network/common/framework.go:23 + should support creating IngressClass API operations [Conformance] + test/e2e/network/ingressclass.go:223 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-network] IngressClass API + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:02:04.45 + Aug 24 12:02:04.450: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename ingressclass 08/24/23 12:02:04.454 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:04.484 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:04.491 + [BeforeEach] [sig-network] IngressClass API + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-network] IngressClass API + test/e2e/network/ingressclass.go:211 + [It] should support creating IngressClass API operations [Conformance] + test/e2e/network/ingressclass.go:223 + STEP: getting /apis 08/24/23 12:02:04.503 + STEP: getting /apis/networking.k8s.io 08/24/23 12:02:04.512 + STEP: getting /apis/networking.k8s.iov1 08/24/23 12:02:04.515 + STEP: creating 08/24/23 12:02:04.518 + STEP: getting 08/24/23 12:02:04.547 + STEP: listing 08/24/23 12:02:04.555 + STEP: watching 08/24/23 12:02:04.563 + Aug 24 12:02:04.563: INFO: starting watch + STEP: patching 08/24/23 12:02:04.565 + STEP: updating 08/24/23 12:02:04.575 + Aug 24 12:02:04.583: INFO: waiting for watch events with expected annotations + Aug 24 12:02:04.583: INFO: saw patched and updated annotations + STEP: deleting 08/24/23 12:02:04.583 + STEP: deleting a collection 08/24/23 12:02:04.602 + [AfterEach] [sig-network] IngressClass API + test/e2e/framework/node/init/init.go:32 + Aug 24 12:02:04.632: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] IngressClass API + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-network] IngressClass API + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-network] IngressClass API + tear down framework | framework.go:193 + STEP: Destroying namespace "ingressclass-8680" for this suite. 08/24/23 12:02:04.644 + << End Captured GinkgoWriter Output +------------------------------ +SSSS +------------------------------ +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + should list, patch and delete a collection of StatefulSets [Conformance] + test/e2e/apps/statefulset.go:908 +[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:55:44.809 -Jul 29 15:55:44.809: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 15:55:44.811 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:44.863 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:44.872 -[BeforeEach] [sig-storage] Downward API volume +STEP: Creating a kubernetes client 08/24/23 12:02:04.658 +Aug 24 12:02:04.658: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename statefulset 08/24/23 12:02:04.66 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:04.692 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:04.698 +[BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 -[It] should provide container's memory request [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:235 -STEP: Creating a pod to test downward API volume plugin 07/29/23 15:55:44.88 -Jul 29 15:55:44.904: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6" in namespace "downward-api-1111" to be "Succeeded or Failed" -Jul 29 15:55:44.920: INFO: Pod "downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6": Phase="Pending", Reason="", readiness=false. Elapsed: 15.536798ms -Jul 29 15:55:46.926: INFO: Pod "downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021681811s -Jul 29 15:55:48.930: INFO: Pod "downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025913133s -STEP: Saw pod success 07/29/23 15:55:48.93 -Jul 29 15:55:48.930: INFO: Pod "downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6" satisfied condition "Succeeded or Failed" -Jul 29 15:55:48.939: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6 container client-container: -STEP: delete the pod 07/29/23 15:55:48.952 -Jul 29 15:55:48.983: INFO: Waiting for pod downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6 to disappear -Jul 29 15:55:48.991: INFO: Pod downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6 no longer exists -[AfterEach] [sig-storage] Downward API volume +[BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 +STEP: Creating service test in namespace statefulset-7243 08/24/23 12:02:04.707 +[It] should list, patch and delete a collection of StatefulSets [Conformance] + test/e2e/apps/statefulset.go:908 +Aug 24 12:02:04.769: INFO: Found 0 stateful pods, waiting for 1 +Aug 24 12:02:14.779: INFO: Waiting for pod test-ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: patching the StatefulSet 08/24/23 12:02:14.79 +W0824 12:02:14.814464 14 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" +Aug 24 12:02:14.828: INFO: Found 1 stateful pods, waiting for 2 +Aug 24 12:02:24.838: INFO: Waiting for pod test-ss-0 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 12:02:24.838: INFO: Waiting for pod test-ss-1 to enter Running - Ready=true, currently Running - Ready=true +STEP: Listing all StatefulSets 08/24/23 12:02:24.852 +STEP: Delete all of the StatefulSets 08/24/23 12:02:24.858 +STEP: Verify that StatefulSets have been deleted 08/24/23 12:02:24.878 +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 +Aug 24 12:02:24.888: INFO: Deleting all statefulset in ns statefulset-7243 +[AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 -Jul 29 15:55:48.991: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Downward API volume +Aug 24 12:02:24.905: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-1111" for this suite. 07/29/23 15:55:49 +STEP: Destroying namespace "statefulset-7243" for this suite. 08/24/23 12:02:24.914 ------------------------------ -• [4.207 seconds] -[sig-storage] Downward API volume -test/e2e/common/storage/framework.go:23 - should provide container's memory request [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:235 +• [SLOW TEST] [20.270 seconds] +[sig-apps] StatefulSet +test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:103 + should list, patch and delete a collection of StatefulSets [Conformance] + test/e2e/apps/statefulset.go:908 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Downward API volume + [BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:55:44.809 - Jul 29 15:55:44.809: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 15:55:44.811 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:44.863 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:44.872 - [BeforeEach] [sig-storage] Downward API volume + STEP: Creating a kubernetes client 08/24/23 12:02:04.658 + Aug 24 12:02:04.658: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename statefulset 08/24/23 12:02:04.66 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:04.692 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:04.698 + [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 - [It] should provide container's memory request [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:235 - STEP: Creating a pod to test downward API volume plugin 07/29/23 15:55:44.88 - Jul 29 15:55:44.904: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6" in namespace "downward-api-1111" to be "Succeeded or Failed" - Jul 29 15:55:44.920: INFO: Pod "downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6": Phase="Pending", Reason="", readiness=false. Elapsed: 15.536798ms - Jul 29 15:55:46.926: INFO: Pod "downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021681811s - Jul 29 15:55:48.930: INFO: Pod "downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025913133s - STEP: Saw pod success 07/29/23 15:55:48.93 - Jul 29 15:55:48.930: INFO: Pod "downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6" satisfied condition "Succeeded or Failed" - Jul 29 15:55:48.939: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6 container client-container: - STEP: delete the pod 07/29/23 15:55:48.952 - Jul 29 15:55:48.983: INFO: Waiting for pod downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6 to disappear - Jul 29 15:55:48.991: INFO: Pod downwardapi-volume-e7bd06bf-0df1-42c8-9c8b-e3702c974bc6 no longer exists - [AfterEach] [sig-storage] Downward API volume + [BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 + [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 + STEP: Creating service test in namespace statefulset-7243 08/24/23 12:02:04.707 + [It] should list, patch and delete a collection of StatefulSets [Conformance] + test/e2e/apps/statefulset.go:908 + Aug 24 12:02:04.769: INFO: Found 0 stateful pods, waiting for 1 + Aug 24 12:02:14.779: INFO: Waiting for pod test-ss-0 to enter Running - Ready=true, currently Running - Ready=true + STEP: patching the StatefulSet 08/24/23 12:02:14.79 + W0824 12:02:14.814464 14 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" + Aug 24 12:02:14.828: INFO: Found 1 stateful pods, waiting for 2 + Aug 24 12:02:24.838: INFO: Waiting for pod test-ss-0 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 12:02:24.838: INFO: Waiting for pod test-ss-1 to enter Running - Ready=true, currently Running - Ready=true + STEP: Listing all StatefulSets 08/24/23 12:02:24.852 + STEP: Delete all of the StatefulSets 08/24/23 12:02:24.858 + STEP: Verify that StatefulSets have been deleted 08/24/23 12:02:24.878 + [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 + Aug 24 12:02:24.888: INFO: Deleting all statefulset in ns statefulset-7243 + [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 - Jul 29 15:55:48.991: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Downward API volume + Aug 24 12:02:24.905: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-1111" for this suite. 07/29/23 15:55:49 + STEP: Destroying namespace "statefulset-7243" for this suite. 08/24/23 12:02:24.914 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSS ------------------------------ -[sig-storage] Downward API volume - should update annotations on modification [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:162 -[BeforeEach] [sig-storage] Downward API volume +[sig-node] Variable Expansion + should allow substituting values in a container's command [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:73 +[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:55:49.021 -Jul 29 15:55:49.021: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 15:55:49.023 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:49.055 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:49.059 -[BeforeEach] [sig-storage] Downward API volume +STEP: Creating a kubernetes client 08/24/23 12:02:24.929 +Aug 24 12:02:24.929: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename var-expansion 08/24/23 12:02:24.932 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:25.043 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:25.05 +[BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 -[It] should update annotations on modification [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:162 -STEP: Creating the pod 07/29/23 15:55:49.064 -Jul 29 15:55:49.080: INFO: Waiting up to 5m0s for pod "annotationupdate02899164-573b-4453-97e9-3a5181c065d7" in namespace "downward-api-6201" to be "running and ready" -Jul 29 15:55:49.086: INFO: Pod "annotationupdate02899164-573b-4453-97e9-3a5181c065d7": Phase="Pending", Reason="", readiness=false. Elapsed: 6.255751ms -Jul 29 15:55:49.086: INFO: The phase of Pod annotationupdate02899164-573b-4453-97e9-3a5181c065d7 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:55:51.095: INFO: Pod "annotationupdate02899164-573b-4453-97e9-3a5181c065d7": Phase="Running", Reason="", readiness=true. Elapsed: 2.01556965s -Jul 29 15:55:51.096: INFO: The phase of Pod annotationupdate02899164-573b-4453-97e9-3a5181c065d7 is Running (Ready = true) -Jul 29 15:55:51.096: INFO: Pod "annotationupdate02899164-573b-4453-97e9-3a5181c065d7" satisfied condition "running and ready" -Jul 29 15:55:51.641: INFO: Successfully updated pod "annotationupdate02899164-573b-4453-97e9-3a5181c065d7" -[AfterEach] [sig-storage] Downward API volume +[It] should allow substituting values in a container's command [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:73 +STEP: Creating a pod to test substitution in container's command 08/24/23 12:02:25.054 +Aug 24 12:02:25.089: INFO: Waiting up to 5m0s for pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f" in namespace "var-expansion-3254" to be "Succeeded or Failed" +Aug 24 12:02:25.103: INFO: Pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f": Phase="Pending", Reason="", readiness=false. Elapsed: 13.29147ms +Aug 24 12:02:27.112: INFO: Pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022193154s +Aug 24 12:02:29.110: INFO: Pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f": Phase="Pending", Reason="", readiness=false. Elapsed: 4.020125314s +Aug 24 12:02:31.113: INFO: Pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.022988239s +STEP: Saw pod success 08/24/23 12:02:31.113 +Aug 24 12:02:31.113: INFO: Pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f" satisfied condition "Succeeded or Failed" +Aug 24 12:02:31.119: INFO: Trying to get logs from node pe9deep4seen-3 pod var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f container dapi-container: +STEP: delete the pod 08/24/23 12:02:31.133 +Aug 24 12:02:31.157: INFO: Waiting for pod var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f to disappear +Aug 24 12:02:31.162: INFO: Pod var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f no longer exists +[AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 -Jul 29 15:55:55.732: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Downward API volume +Aug 24 12:02:31.163: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-6201" for this suite. 07/29/23 15:55:55.744 +STEP: Destroying namespace "var-expansion-3254" for this suite. 08/24/23 12:02:31.172 ------------------------------ -• [SLOW TEST] [6.736 seconds] -[sig-storage] Downward API volume -test/e2e/common/storage/framework.go:23 - should update annotations on modification [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:162 +• [SLOW TEST] [6.256 seconds] +[sig-node] Variable Expansion +test/e2e/common/node/framework.go:23 + should allow substituting values in a container's command [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:73 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Downward API volume + [BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:55:49.021 - Jul 29 15:55:49.021: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 15:55:49.023 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:49.055 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:49.059 - [BeforeEach] [sig-storage] Downward API volume + STEP: Creating a kubernetes client 08/24/23 12:02:24.929 + Aug 24 12:02:24.929: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename var-expansion 08/24/23 12:02:24.932 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:25.043 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:25.05 + [BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 - [It] should update annotations on modification [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:162 - STEP: Creating the pod 07/29/23 15:55:49.064 - Jul 29 15:55:49.080: INFO: Waiting up to 5m0s for pod "annotationupdate02899164-573b-4453-97e9-3a5181c065d7" in namespace "downward-api-6201" to be "running and ready" - Jul 29 15:55:49.086: INFO: Pod "annotationupdate02899164-573b-4453-97e9-3a5181c065d7": Phase="Pending", Reason="", readiness=false. Elapsed: 6.255751ms - Jul 29 15:55:49.086: INFO: The phase of Pod annotationupdate02899164-573b-4453-97e9-3a5181c065d7 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:55:51.095: INFO: Pod "annotationupdate02899164-573b-4453-97e9-3a5181c065d7": Phase="Running", Reason="", readiness=true. Elapsed: 2.01556965s - Jul 29 15:55:51.096: INFO: The phase of Pod annotationupdate02899164-573b-4453-97e9-3a5181c065d7 is Running (Ready = true) - Jul 29 15:55:51.096: INFO: Pod "annotationupdate02899164-573b-4453-97e9-3a5181c065d7" satisfied condition "running and ready" - Jul 29 15:55:51.641: INFO: Successfully updated pod "annotationupdate02899164-573b-4453-97e9-3a5181c065d7" - [AfterEach] [sig-storage] Downward API volume + [It] should allow substituting values in a container's command [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:73 + STEP: Creating a pod to test substitution in container's command 08/24/23 12:02:25.054 + Aug 24 12:02:25.089: INFO: Waiting up to 5m0s for pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f" in namespace "var-expansion-3254" to be "Succeeded or Failed" + Aug 24 12:02:25.103: INFO: Pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f": Phase="Pending", Reason="", readiness=false. Elapsed: 13.29147ms + Aug 24 12:02:27.112: INFO: Pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022193154s + Aug 24 12:02:29.110: INFO: Pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f": Phase="Pending", Reason="", readiness=false. Elapsed: 4.020125314s + Aug 24 12:02:31.113: INFO: Pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.022988239s + STEP: Saw pod success 08/24/23 12:02:31.113 + Aug 24 12:02:31.113: INFO: Pod "var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f" satisfied condition "Succeeded or Failed" + Aug 24 12:02:31.119: INFO: Trying to get logs from node pe9deep4seen-3 pod var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f container dapi-container: + STEP: delete the pod 08/24/23 12:02:31.133 + Aug 24 12:02:31.157: INFO: Waiting for pod var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f to disappear + Aug 24 12:02:31.162: INFO: Pod var-expansion-e5d0acf8-6b08-4cb6-b7df-21c005351c2f no longer exists + [AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 - Jul 29 15:55:55.732: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Downward API volume + Aug 24 12:02:31.163: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-6201" for this suite. 07/29/23 15:55:55.744 + STEP: Destroying namespace "var-expansion-3254" for this suite. 08/24/23 12:02:31.172 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSS ------------------------------- -[sig-auth] ServiceAccounts - should mount projected service account token [Conformance] - test/e2e/auth/service_accounts.go:275 -[BeforeEach] [sig-auth] ServiceAccounts +[sig-cli] Kubectl client Kubectl diff + should check if kubectl diff finds a difference for Deployments [Conformance] + test/e2e/kubectl/kubectl.go:931 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:55:55.759 -Jul 29 15:55:55.759: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename svcaccounts 07/29/23 15:55:55.764 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:55.799 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:55.807 -[BeforeEach] [sig-auth] ServiceAccounts +STEP: Creating a kubernetes client 08/24/23 12:02:31.186 +Aug 24 12:02:31.186: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:02:31.188 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:31.223 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:31.234 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[It] should mount projected service account token [Conformance] - test/e2e/auth/service_accounts.go:275 -STEP: Creating a pod to test service account token: 07/29/23 15:55:55.812 -Jul 29 15:55:55.831: INFO: Waiting up to 5m0s for pod "test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8" in namespace "svcaccounts-3769" to be "Succeeded or Failed" -Jul 29 15:55:55.839: INFO: Pod "test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8": Phase="Pending", Reason="", readiness=false. Elapsed: 8.534353ms -Jul 29 15:55:57.850: INFO: Pod "test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019570843s -Jul 29 15:55:59.851: INFO: Pod "test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019745802s -STEP: Saw pod success 07/29/23 15:55:59.851 -Jul 29 15:55:59.852: INFO: Pod "test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8" satisfied condition "Succeeded or Failed" -Jul 29 15:55:59.858: INFO: Trying to get logs from node wetuj3nuajog-3 pod test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8 container agnhost-container: -STEP: delete the pod 07/29/23 15:55:59.875 -Jul 29 15:55:59.945: INFO: Waiting for pod test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8 to disappear -Jul 29 15:55:59.962: INFO: Pod test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8 no longer exists -[AfterEach] [sig-auth] ServiceAccounts +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[It] should check if kubectl diff finds a difference for Deployments [Conformance] + test/e2e/kubectl/kubectl.go:931 +STEP: create deployment with httpd image 08/24/23 12:02:31.24 +Aug 24 12:02:31.241: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3619 create -f -' +Aug 24 12:02:31.748: INFO: stderr: "" +Aug 24 12:02:31.749: INFO: stdout: "deployment.apps/httpd-deployment created\n" +STEP: verify diff finds difference between live and declared image 08/24/23 12:02:31.749 +Aug 24 12:02:31.750: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3619 diff -f -' +Aug 24 12:02:32.284: INFO: rc: 1 +Aug 24 12:02:32.284: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3619 delete -f -' +Aug 24 12:02:32.536: INFO: stderr: "" +Aug 24 12:02:32.537: INFO: stdout: "deployment.apps \"httpd-deployment\" deleted\n" +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 15:55:59.963: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +Aug 24 12:02:32.537: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "svcaccounts-3769" for this suite. 07/29/23 15:55:59.976 +STEP: Destroying namespace "kubectl-3619" for this suite. 08/24/23 12:02:32.547 ------------------------------ -• [4.230 seconds] -[sig-auth] ServiceAccounts -test/e2e/auth/framework.go:23 - should mount projected service account token [Conformance] - test/e2e/auth/service_accounts.go:275 +• [1.372 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Kubectl diff + test/e2e/kubectl/kubectl.go:925 + should check if kubectl diff finds a difference for Deployments [Conformance] + test/e2e/kubectl/kubectl.go:931 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-auth] ServiceAccounts + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:55:55.759 - Jul 29 15:55:55.759: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename svcaccounts 07/29/23 15:55:55.764 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:55:55.799 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:55:55.807 - [BeforeEach] [sig-auth] ServiceAccounts + STEP: Creating a kubernetes client 08/24/23 12:02:31.186 + Aug 24 12:02:31.186: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:02:31.188 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:31.223 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:31.234 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [It] should mount projected service account token [Conformance] - test/e2e/auth/service_accounts.go:275 - STEP: Creating a pod to test service account token: 07/29/23 15:55:55.812 - Jul 29 15:55:55.831: INFO: Waiting up to 5m0s for pod "test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8" in namespace "svcaccounts-3769" to be "Succeeded or Failed" - Jul 29 15:55:55.839: INFO: Pod "test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8": Phase="Pending", Reason="", readiness=false. Elapsed: 8.534353ms - Jul 29 15:55:57.850: INFO: Pod "test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019570843s - Jul 29 15:55:59.851: INFO: Pod "test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019745802s - STEP: Saw pod success 07/29/23 15:55:59.851 - Jul 29 15:55:59.852: INFO: Pod "test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8" satisfied condition "Succeeded or Failed" - Jul 29 15:55:59.858: INFO: Trying to get logs from node wetuj3nuajog-3 pod test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8 container agnhost-container: - STEP: delete the pod 07/29/23 15:55:59.875 - Jul 29 15:55:59.945: INFO: Waiting for pod test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8 to disappear - Jul 29 15:55:59.962: INFO: Pod test-pod-232e4ebf-9148-435e-bd82-91aae729a8d8 no longer exists - [AfterEach] [sig-auth] ServiceAccounts + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [It] should check if kubectl diff finds a difference for Deployments [Conformance] + test/e2e/kubectl/kubectl.go:931 + STEP: create deployment with httpd image 08/24/23 12:02:31.24 + Aug 24 12:02:31.241: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3619 create -f -' + Aug 24 12:02:31.748: INFO: stderr: "" + Aug 24 12:02:31.749: INFO: stdout: "deployment.apps/httpd-deployment created\n" + STEP: verify diff finds difference between live and declared image 08/24/23 12:02:31.749 + Aug 24 12:02:31.750: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3619 diff -f -' + Aug 24 12:02:32.284: INFO: rc: 1 + Aug 24 12:02:32.284: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-3619 delete -f -' + Aug 24 12:02:32.536: INFO: stderr: "" + Aug 24 12:02:32.537: INFO: stdout: "deployment.apps \"httpd-deployment\" deleted\n" + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 15:55:59.963: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + Aug 24 12:02:32.537: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "svcaccounts-3769" for this suite. 07/29/23 15:55:59.976 + STEP: Destroying namespace "kubectl-3619" for this suite. 08/24/23 12:02:32.547 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSSSS ------------------------------ -[sig-node] PodTemplates - should replace a pod template [Conformance] - test/e2e/common/node/podtemplates.go:176 -[BeforeEach] [sig-node] PodTemplates +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] + test/e2e/apimachinery/resource_quota.go:75 +[BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:55:59.99 -Jul 29 15:55:59.990: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename podtemplate 07/29/23 15:55:59.995 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:56:00.032 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:56:00.039 -[BeforeEach] [sig-node] PodTemplates +STEP: Creating a kubernetes client 08/24/23 12:02:32.558 +Aug 24 12:02:32.559: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 12:02:32.561 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:32.596 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:32.601 +[BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 -[It] should replace a pod template [Conformance] - test/e2e/common/node/podtemplates.go:176 -STEP: Create a pod template 07/29/23 15:56:00.048 -STEP: Replace a pod template 07/29/23 15:56:00.063 -Jul 29 15:56:00.096: INFO: Found updated podtemplate annotation: "true" - -[AfterEach] [sig-node] PodTemplates +[It] should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] + test/e2e/apimachinery/resource_quota.go:75 +STEP: Counting existing ResourceQuota 08/24/23 12:02:32.609 +STEP: Creating a ResourceQuota 08/24/23 12:02:37.619 +STEP: Ensuring resource quota status is calculated 08/24/23 12:02:37.629 +[AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 15:56:00.097: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] PodTemplates +Aug 24 12:02:39.638: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] PodTemplates +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] PodTemplates +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "podtemplate-1624" for this suite. 07/29/23 15:56:00.131 +STEP: Destroying namespace "resourcequota-1450" for this suite. 08/24/23 12:02:39.647 ------------------------------ -• [0.165 seconds] -[sig-node] PodTemplates -test/e2e/common/node/framework.go:23 - should replace a pod template [Conformance] - test/e2e/common/node/podtemplates.go:176 +• [SLOW TEST] [7.102 seconds] +[sig-api-machinery] ResourceQuota +test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] + test/e2e/apimachinery/resource_quota.go:75 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] PodTemplates + [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:55:59.99 - Jul 29 15:55:59.990: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename podtemplate 07/29/23 15:55:59.995 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:56:00.032 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:56:00.039 - [BeforeEach] [sig-node] PodTemplates + STEP: Creating a kubernetes client 08/24/23 12:02:32.558 + Aug 24 12:02:32.559: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 12:02:32.561 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:32.596 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:32.601 + [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [It] should replace a pod template [Conformance] - test/e2e/common/node/podtemplates.go:176 - STEP: Create a pod template 07/29/23 15:56:00.048 - STEP: Replace a pod template 07/29/23 15:56:00.063 - Jul 29 15:56:00.096: INFO: Found updated podtemplate annotation: "true" - - [AfterEach] [sig-node] PodTemplates + [It] should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] + test/e2e/apimachinery/resource_quota.go:75 + STEP: Counting existing ResourceQuota 08/24/23 12:02:32.609 + STEP: Creating a ResourceQuota 08/24/23 12:02:37.619 + STEP: Ensuring resource quota status is calculated 08/24/23 12:02:37.629 + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 15:56:00.097: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] PodTemplates + Aug 24 12:02:39.638: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] PodTemplates + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] PodTemplates + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "podtemplate-1624" for this suite. 07/29/23 15:56:00.131 + STEP: Destroying namespace "resourcequota-1450" for this suite. 08/24/23 12:02:39.647 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:89 -[BeforeEach] [sig-storage] Projected configMap +[sig-storage] EmptyDir wrapper volumes + should not cause race condition when used for configmaps [Serial] [Conformance] + test/e2e/storage/empty_dir_wrapper.go:189 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:56:00.157 -Jul 29 15:56:00.157: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 15:56:00.166 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:56:00.216 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:56:00.222 -[BeforeEach] [sig-storage] Projected configMap +STEP: Creating a kubernetes client 08/24/23 12:02:39.665 +Aug 24 12:02:39.665: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir-wrapper 08/24/23 12:02:39.667 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:39.695 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:39.702 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:89 -STEP: Creating configMap with name projected-configmap-test-volume-map-9e77d05a-5ad2-45eb-93c0-3c7c897a2222 07/29/23 15:56:00.23 -STEP: Creating a pod to test consume configMaps 07/29/23 15:56:00.24 -Jul 29 15:56:00.261: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76" in namespace "projected-9631" to be "Succeeded or Failed" -Jul 29 15:56:00.270: INFO: Pod "pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76": Phase="Pending", Reason="", readiness=false. Elapsed: 8.84384ms -Jul 29 15:56:02.281: INFO: Pod "pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019451878s -Jul 29 15:56:04.279: INFO: Pod "pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017436525s -STEP: Saw pod success 07/29/23 15:56:04.28 -Jul 29 15:56:04.282: INFO: Pod "pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76" satisfied condition "Succeeded or Failed" -Jul 29 15:56:04.287: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76 container agnhost-container: -STEP: delete the pod 07/29/23 15:56:04.313 -Jul 29 15:56:04.348: INFO: Waiting for pod pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76 to disappear -Jul 29 15:56:04.353: INFO: Pod pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76 no longer exists -[AfterEach] [sig-storage] Projected configMap +[It] should not cause race condition when used for configmaps [Serial] [Conformance] + test/e2e/storage/empty_dir_wrapper.go:189 +STEP: Creating 50 configmaps 08/24/23 12:02:39.708 +STEP: Creating RC which spawns configmap-volume pods 08/24/23 12:02:40.086 +Aug 24 12:02:40.110: INFO: Pod name wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216: Found 0 pods out of 5 +Aug 24 12:02:45.128: INFO: Pod name wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216: Found 5 pods out of 5 +STEP: Ensuring each pod is running 08/24/23 12:02:45.128 +Aug 24 12:02:45.128: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:02:45.136: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 8.359166ms +Aug 24 12:02:47.152: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023857548s +Aug 24 12:02:49.151: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 4.022990404s +Aug 24 12:02:51.149: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 6.020813805s +Aug 24 12:02:53.146: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 8.017778866s +Aug 24 12:02:55.180: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 10.051730301s +Aug 24 12:02:57.149: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Running", Reason="", readiness=true. Elapsed: 12.021524891s +Aug 24 12:02:57.150: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k" satisfied condition "running" +Aug 24 12:02:57.150: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-g784n" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:02:57.160: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-g784n": Phase="Running", Reason="", readiness=true. Elapsed: 10.026625ms +Aug 24 12:02:57.160: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-g784n" satisfied condition "running" +Aug 24 12:02:57.160: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-hw98m" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:02:57.171: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-hw98m": Phase="Running", Reason="", readiness=true. Elapsed: 10.581572ms +Aug 24 12:02:57.171: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-hw98m" satisfied condition "running" +Aug 24 12:02:57.171: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-k5cmn" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:02:57.180: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-k5cmn": Phase="Running", Reason="", readiness=true. Elapsed: 8.173669ms +Aug 24 12:02:57.180: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-k5cmn" satisfied condition "running" +Aug 24 12:02:57.180: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-kc8hz" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:02:57.187: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-kc8hz": Phase="Running", Reason="", readiness=true. Elapsed: 6.772071ms +Aug 24 12:02:57.187: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-kc8hz" satisfied condition "running" +STEP: deleting ReplicationController wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216 in namespace emptydir-wrapper-3153, will wait for the garbage collector to delete the pods 08/24/23 12:02:57.187 +Aug 24 12:02:57.266: INFO: Deleting ReplicationController wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216 took: 17.273278ms +Aug 24 12:02:57.367: INFO: Terminating ReplicationController wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216 pods took: 100.484286ms +STEP: Creating RC which spawns configmap-volume pods 08/24/23 12:03:00.181 +Aug 24 12:03:00.208: INFO: Pod name wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827: Found 0 pods out of 5 +Aug 24 12:03:05.232: INFO: Pod name wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827: Found 5 pods out of 5 +STEP: Ensuring each pod is running 08/24/23 12:03:05.232 +Aug 24 12:03:05.233: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:03:05.245: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Pending", Reason="", readiness=false. Elapsed: 12.202784ms +Aug 24 12:03:07.253: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020635752s +Aug 24 12:03:09.257: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Pending", Reason="", readiness=false. Elapsed: 4.024338191s +Aug 24 12:03:11.256: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Pending", Reason="", readiness=false. Elapsed: 6.023106052s +Aug 24 12:03:13.474: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Pending", Reason="", readiness=false. Elapsed: 8.241409927s +Aug 24 12:03:15.256: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Running", Reason="", readiness=true. Elapsed: 10.023400714s +Aug 24 12:03:15.256: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq" satisfied condition "running" +Aug 24 12:03:15.256: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-n4fpx" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:03:15.264: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-n4fpx": Phase="Running", Reason="", readiness=true. Elapsed: 7.508693ms +Aug 24 12:03:15.264: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-n4fpx" satisfied condition "running" +Aug 24 12:03:15.264: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-njbh5" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:03:15.271: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-njbh5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.624897ms +Aug 24 12:03:17.283: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-njbh5": Phase="Running", Reason="", readiness=true. Elapsed: 2.01886981s +Aug 24 12:03:17.283: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-njbh5" satisfied condition "running" +Aug 24 12:03:17.284: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-w2z9k" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:03:17.295: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-w2z9k": Phase="Running", Reason="", readiness=true. Elapsed: 11.081999ms +Aug 24 12:03:17.295: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-w2z9k" satisfied condition "running" +Aug 24 12:03:17.295: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-wskkg" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:03:17.304: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-wskkg": Phase="Running", Reason="", readiness=true. Elapsed: 8.656357ms +Aug 24 12:03:17.304: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-wskkg" satisfied condition "running" +STEP: deleting ReplicationController wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827 in namespace emptydir-wrapper-3153, will wait for the garbage collector to delete the pods 08/24/23 12:03:17.305 +Aug 24 12:03:17.388: INFO: Deleting ReplicationController wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827 took: 20.324822ms +Aug 24 12:03:17.588: INFO: Terminating ReplicationController wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827 pods took: 200.369357ms +STEP: Creating RC which spawns configmap-volume pods 08/24/23 12:03:20.301 +Aug 24 12:03:20.338: INFO: Pod name wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359: Found 0 pods out of 5 +Aug 24 12:03:25.356: INFO: Pod name wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359: Found 5 pods out of 5 +STEP: Ensuring each pod is running 08/24/23 12:03:25.356 +Aug 24 12:03:25.357: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:03:25.366: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 9.292609ms +Aug 24 12:03:27.387: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 2.030901421s +Aug 24 12:03:29.375: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018589702s +Aug 24 12:03:31.381: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 6.02487661s +Aug 24 12:03:33.375: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 8.017944368s +Aug 24 12:03:35.395: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 10.038159108s +Aug 24 12:03:37.375: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Running", Reason="", readiness=true. Elapsed: 12.018574073s +Aug 24 12:03:37.375: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks" satisfied condition "running" +Aug 24 12:03:37.375: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-blr4x" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:03:37.383: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-blr4x": Phase="Running", Reason="", readiness=true. Elapsed: 7.809901ms +Aug 24 12:03:37.383: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-blr4x" satisfied condition "running" +Aug 24 12:03:37.383: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-h7f2t" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:03:37.392: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-h7f2t": Phase="Running", Reason="", readiness=true. Elapsed: 8.678785ms +Aug 24 12:03:37.392: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-h7f2t" satisfied condition "running" +Aug 24 12:03:37.392: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-p7kwt" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:03:37.400: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-p7kwt": Phase="Running", Reason="", readiness=true. Elapsed: 7.719113ms +Aug 24 12:03:37.400: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-p7kwt" satisfied condition "running" +Aug 24 12:03:37.400: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-tbfm4" in namespace "emptydir-wrapper-3153" to be "running" +Aug 24 12:03:37.408: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-tbfm4": Phase="Running", Reason="", readiness=true. Elapsed: 7.546671ms +Aug 24 12:03:37.408: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-tbfm4" satisfied condition "running" +STEP: deleting ReplicationController wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359 in namespace emptydir-wrapper-3153, will wait for the garbage collector to delete the pods 08/24/23 12:03:37.408 +Aug 24 12:03:37.479: INFO: Deleting ReplicationController wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359 took: 12.279694ms +Aug 24 12:03:37.679: INFO: Terminating ReplicationController wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359 pods took: 200.810427ms +STEP: Cleaning up the configMaps 08/24/23 12:03:40.98 +[AfterEach] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/node/init/init.go:32 -Jul 29 15:56:04.353: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected configMap +Aug 24 12:03:41.541: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes tear down framework | framework.go:193 -STEP: Destroying namespace "projected-9631" for this suite. 07/29/23 15:56:04.363 +STEP: Destroying namespace "emptydir-wrapper-3153" for this suite. 08/24/23 12:03:41.551 ------------------------------ -• [4.219 seconds] -[sig-storage] Projected configMap -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:89 +• [SLOW TEST] [61.901 seconds] +[sig-storage] EmptyDir wrapper volumes +test/e2e/storage/utils/framework.go:23 + should not cause race condition when used for configmaps [Serial] [Conformance] + test/e2e/storage/empty_dir_wrapper.go:189 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected configMap + [BeforeEach] [sig-storage] EmptyDir wrapper volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:56:00.157 - Jul 29 15:56:00.157: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 15:56:00.166 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:56:00.216 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:56:00.222 - [BeforeEach] [sig-storage] Projected configMap - test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:89 - STEP: Creating configMap with name projected-configmap-test-volume-map-9e77d05a-5ad2-45eb-93c0-3c7c897a2222 07/29/23 15:56:00.23 - STEP: Creating a pod to test consume configMaps 07/29/23 15:56:00.24 - Jul 29 15:56:00.261: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76" in namespace "projected-9631" to be "Succeeded or Failed" - Jul 29 15:56:00.270: INFO: Pod "pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76": Phase="Pending", Reason="", readiness=false. Elapsed: 8.84384ms - Jul 29 15:56:02.281: INFO: Pod "pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019451878s - Jul 29 15:56:04.279: INFO: Pod "pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017436525s - STEP: Saw pod success 07/29/23 15:56:04.28 - Jul 29 15:56:04.282: INFO: Pod "pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76" satisfied condition "Succeeded or Failed" - Jul 29 15:56:04.287: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76 container agnhost-container: - STEP: delete the pod 07/29/23 15:56:04.313 - Jul 29 15:56:04.348: INFO: Waiting for pod pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76 to disappear - Jul 29 15:56:04.353: INFO: Pod pod-projected-configmaps-e64fd63f-e20b-454c-9cd7-1ed8afa37b76 no longer exists - [AfterEach] [sig-storage] Projected configMap + STEP: Creating a kubernetes client 08/24/23 12:02:39.665 + Aug 24 12:02:39.665: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir-wrapper 08/24/23 12:02:39.667 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:02:39.695 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:02:39.702 + [BeforeEach] [sig-storage] EmptyDir wrapper volumes + test/e2e/framework/metrics/init/init.go:31 + [It] should not cause race condition when used for configmaps [Serial] [Conformance] + test/e2e/storage/empty_dir_wrapper.go:189 + STEP: Creating 50 configmaps 08/24/23 12:02:39.708 + STEP: Creating RC which spawns configmap-volume pods 08/24/23 12:02:40.086 + Aug 24 12:02:40.110: INFO: Pod name wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216: Found 0 pods out of 5 + Aug 24 12:02:45.128: INFO: Pod name wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216: Found 5 pods out of 5 + STEP: Ensuring each pod is running 08/24/23 12:02:45.128 + Aug 24 12:02:45.128: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:02:45.136: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 8.359166ms + Aug 24 12:02:47.152: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023857548s + Aug 24 12:02:49.151: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 4.022990404s + Aug 24 12:02:51.149: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 6.020813805s + Aug 24 12:02:53.146: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 8.017778866s + Aug 24 12:02:55.180: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Pending", Reason="", readiness=false. Elapsed: 10.051730301s + Aug 24 12:02:57.149: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k": Phase="Running", Reason="", readiness=true. Elapsed: 12.021524891s + Aug 24 12:02:57.150: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-8dt4k" satisfied condition "running" + Aug 24 12:02:57.150: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-g784n" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:02:57.160: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-g784n": Phase="Running", Reason="", readiness=true. Elapsed: 10.026625ms + Aug 24 12:02:57.160: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-g784n" satisfied condition "running" + Aug 24 12:02:57.160: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-hw98m" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:02:57.171: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-hw98m": Phase="Running", Reason="", readiness=true. Elapsed: 10.581572ms + Aug 24 12:02:57.171: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-hw98m" satisfied condition "running" + Aug 24 12:02:57.171: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-k5cmn" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:02:57.180: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-k5cmn": Phase="Running", Reason="", readiness=true. Elapsed: 8.173669ms + Aug 24 12:02:57.180: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-k5cmn" satisfied condition "running" + Aug 24 12:02:57.180: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-kc8hz" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:02:57.187: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-kc8hz": Phase="Running", Reason="", readiness=true. Elapsed: 6.772071ms + Aug 24 12:02:57.187: INFO: Pod "wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216-kc8hz" satisfied condition "running" + STEP: deleting ReplicationController wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216 in namespace emptydir-wrapper-3153, will wait for the garbage collector to delete the pods 08/24/23 12:02:57.187 + Aug 24 12:02:57.266: INFO: Deleting ReplicationController wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216 took: 17.273278ms + Aug 24 12:02:57.367: INFO: Terminating ReplicationController wrapped-volume-race-885a3d96-9376-4fc3-b626-5bc2293a5216 pods took: 100.484286ms + STEP: Creating RC which spawns configmap-volume pods 08/24/23 12:03:00.181 + Aug 24 12:03:00.208: INFO: Pod name wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827: Found 0 pods out of 5 + Aug 24 12:03:05.232: INFO: Pod name wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827: Found 5 pods out of 5 + STEP: Ensuring each pod is running 08/24/23 12:03:05.232 + Aug 24 12:03:05.233: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:03:05.245: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Pending", Reason="", readiness=false. Elapsed: 12.202784ms + Aug 24 12:03:07.253: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020635752s + Aug 24 12:03:09.257: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Pending", Reason="", readiness=false. Elapsed: 4.024338191s + Aug 24 12:03:11.256: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Pending", Reason="", readiness=false. Elapsed: 6.023106052s + Aug 24 12:03:13.474: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Pending", Reason="", readiness=false. Elapsed: 8.241409927s + Aug 24 12:03:15.256: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq": Phase="Running", Reason="", readiness=true. Elapsed: 10.023400714s + Aug 24 12:03:15.256: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-cbwqq" satisfied condition "running" + Aug 24 12:03:15.256: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-n4fpx" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:03:15.264: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-n4fpx": Phase="Running", Reason="", readiness=true. Elapsed: 7.508693ms + Aug 24 12:03:15.264: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-n4fpx" satisfied condition "running" + Aug 24 12:03:15.264: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-njbh5" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:03:15.271: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-njbh5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.624897ms + Aug 24 12:03:17.283: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-njbh5": Phase="Running", Reason="", readiness=true. Elapsed: 2.01886981s + Aug 24 12:03:17.283: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-njbh5" satisfied condition "running" + Aug 24 12:03:17.284: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-w2z9k" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:03:17.295: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-w2z9k": Phase="Running", Reason="", readiness=true. Elapsed: 11.081999ms + Aug 24 12:03:17.295: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-w2z9k" satisfied condition "running" + Aug 24 12:03:17.295: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-wskkg" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:03:17.304: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-wskkg": Phase="Running", Reason="", readiness=true. Elapsed: 8.656357ms + Aug 24 12:03:17.304: INFO: Pod "wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827-wskkg" satisfied condition "running" + STEP: deleting ReplicationController wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827 in namespace emptydir-wrapper-3153, will wait for the garbage collector to delete the pods 08/24/23 12:03:17.305 + Aug 24 12:03:17.388: INFO: Deleting ReplicationController wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827 took: 20.324822ms + Aug 24 12:03:17.588: INFO: Terminating ReplicationController wrapped-volume-race-e3b461cc-1f2a-43bb-a525-aa014985d827 pods took: 200.369357ms + STEP: Creating RC which spawns configmap-volume pods 08/24/23 12:03:20.301 + Aug 24 12:03:20.338: INFO: Pod name wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359: Found 0 pods out of 5 + Aug 24 12:03:25.356: INFO: Pod name wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359: Found 5 pods out of 5 + STEP: Ensuring each pod is running 08/24/23 12:03:25.356 + Aug 24 12:03:25.357: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:03:25.366: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 9.292609ms + Aug 24 12:03:27.387: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 2.030901421s + Aug 24 12:03:29.375: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018589702s + Aug 24 12:03:31.381: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 6.02487661s + Aug 24 12:03:33.375: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 8.017944368s + Aug 24 12:03:35.395: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Pending", Reason="", readiness=false. Elapsed: 10.038159108s + Aug 24 12:03:37.375: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks": Phase="Running", Reason="", readiness=true. Elapsed: 12.018574073s + Aug 24 12:03:37.375: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-9g7ks" satisfied condition "running" + Aug 24 12:03:37.375: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-blr4x" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:03:37.383: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-blr4x": Phase="Running", Reason="", readiness=true. Elapsed: 7.809901ms + Aug 24 12:03:37.383: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-blr4x" satisfied condition "running" + Aug 24 12:03:37.383: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-h7f2t" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:03:37.392: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-h7f2t": Phase="Running", Reason="", readiness=true. Elapsed: 8.678785ms + Aug 24 12:03:37.392: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-h7f2t" satisfied condition "running" + Aug 24 12:03:37.392: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-p7kwt" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:03:37.400: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-p7kwt": Phase="Running", Reason="", readiness=true. Elapsed: 7.719113ms + Aug 24 12:03:37.400: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-p7kwt" satisfied condition "running" + Aug 24 12:03:37.400: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-tbfm4" in namespace "emptydir-wrapper-3153" to be "running" + Aug 24 12:03:37.408: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-tbfm4": Phase="Running", Reason="", readiness=true. Elapsed: 7.546671ms + Aug 24 12:03:37.408: INFO: Pod "wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359-tbfm4" satisfied condition "running" + STEP: deleting ReplicationController wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359 in namespace emptydir-wrapper-3153, will wait for the garbage collector to delete the pods 08/24/23 12:03:37.408 + Aug 24 12:03:37.479: INFO: Deleting ReplicationController wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359 took: 12.279694ms + Aug 24 12:03:37.679: INFO: Terminating ReplicationController wrapped-volume-race-f2a480b8-649c-4693-b9b1-79ed3ea8b359 pods took: 200.810427ms + STEP: Cleaning up the configMaps 08/24/23 12:03:40.98 + [AfterEach] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/node/init/init.go:32 - Jul 29 15:56:04.353: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected configMap + Aug 24 12:03:41.541: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes tear down framework | framework.go:193 - STEP: Destroying namespace "projected-9631" for this suite. 07/29/23 15:56:04.363 + STEP: Destroying namespace "emptydir-wrapper-3153" for this suite. 08/24/23 12:03:41.551 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:109 -[BeforeEach] [sig-storage] Projected configMap +[sig-storage] Downward API volume + should provide container's memory request [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:235 +[BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:56:04.392 -Jul 29 15:56:04.393: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 15:56:04.398 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:56:04.439 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:56:04.449 -[BeforeEach] [sig-storage] Projected configMap +STEP: Creating a kubernetes client 08/24/23 12:03:41.587 +Aug 24 12:03:41.587: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:03:41.59 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:41.623 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:41.629 +[BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:109 -STEP: Creating configMap with name projected-configmap-test-volume-map-d5ed99f9-5f9c-4c09-bd04-4c002536ad08 07/29/23 15:56:04.455 -STEP: Creating a pod to test consume configMaps 07/29/23 15:56:04.465 -Jul 29 15:56:04.483: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b" in namespace "projected-3118" to be "Succeeded or Failed" -Jul 29 15:56:04.490: INFO: Pod "pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b": Phase="Pending", Reason="", readiness=false. Elapsed: 6.391062ms -Jul 29 15:56:06.504: INFO: Pod "pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020593718s -Jul 29 15:56:08.498: INFO: Pod "pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014479869s -STEP: Saw pod success 07/29/23 15:56:08.498 -Jul 29 15:56:08.499: INFO: Pod "pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b" satisfied condition "Succeeded or Failed" -Jul 29 15:56:08.505: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b container agnhost-container: -STEP: delete the pod 07/29/23 15:56:08.521 -Jul 29 15:56:08.551: INFO: Waiting for pod pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b to disappear -Jul 29 15:56:08.557: INFO: Pod pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b no longer exists -[AfterEach] [sig-storage] Projected configMap +[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 +[It] should provide container's memory request [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:235 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:03:41.636 +Aug 24 12:03:41.658: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673" in namespace "downward-api-7237" to be "Succeeded or Failed" +Aug 24 12:03:41.666: INFO: Pod "downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673": Phase="Pending", Reason="", readiness=false. Elapsed: 7.223134ms +Aug 24 12:03:43.675: INFO: Pod "downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01590422s +Aug 24 12:03:45.674: INFO: Pod "downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01563273s +STEP: Saw pod success 08/24/23 12:03:45.676 +Aug 24 12:03:45.676: INFO: Pod "downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673" satisfied condition "Succeeded or Failed" +Aug 24 12:03:45.680: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673 container client-container: +STEP: delete the pod 08/24/23 12:03:45.691 +Aug 24 12:03:45.712: INFO: Waiting for pod downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673 to disappear +Aug 24 12:03:45.716: INFO: Pod downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673 no longer exists +[AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 15:56:08.558: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected configMap +Aug 24 12:03:45.716: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "projected-3118" for this suite. 07/29/23 15:56:08.569 +STEP: Destroying namespace "downward-api-7237" for this suite. 08/24/23 12:03:45.725 ------------------------------ -• [4.190 seconds] -[sig-storage] Projected configMap +• [4.149 seconds] +[sig-storage] Downward API volume test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:109 + should provide container's memory request [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:235 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected configMap + [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:56:04.392 - Jul 29 15:56:04.393: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 15:56:04.398 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:56:04.439 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:56:04.449 - [BeforeEach] [sig-storage] Projected configMap + STEP: Creating a kubernetes client 08/24/23 12:03:41.587 + Aug 24 12:03:41.587: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:03:41.59 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:41.623 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:41.629 + [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:109 - STEP: Creating configMap with name projected-configmap-test-volume-map-d5ed99f9-5f9c-4c09-bd04-4c002536ad08 07/29/23 15:56:04.455 - STEP: Creating a pod to test consume configMaps 07/29/23 15:56:04.465 - Jul 29 15:56:04.483: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b" in namespace "projected-3118" to be "Succeeded or Failed" - Jul 29 15:56:04.490: INFO: Pod "pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b": Phase="Pending", Reason="", readiness=false. Elapsed: 6.391062ms - Jul 29 15:56:06.504: INFO: Pod "pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020593718s - Jul 29 15:56:08.498: INFO: Pod "pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014479869s - STEP: Saw pod success 07/29/23 15:56:08.498 - Jul 29 15:56:08.499: INFO: Pod "pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b" satisfied condition "Succeeded or Failed" - Jul 29 15:56:08.505: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b container agnhost-container: - STEP: delete the pod 07/29/23 15:56:08.521 - Jul 29 15:56:08.551: INFO: Waiting for pod pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b to disappear - Jul 29 15:56:08.557: INFO: Pod pod-projected-configmaps-24a99b92-4266-4824-9bef-a839bc8d718b no longer exists - [AfterEach] [sig-storage] Projected configMap + [BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 + [It] should provide container's memory request [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:235 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:03:41.636 + Aug 24 12:03:41.658: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673" in namespace "downward-api-7237" to be "Succeeded or Failed" + Aug 24 12:03:41.666: INFO: Pod "downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673": Phase="Pending", Reason="", readiness=false. Elapsed: 7.223134ms + Aug 24 12:03:43.675: INFO: Pod "downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01590422s + Aug 24 12:03:45.674: INFO: Pod "downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01563273s + STEP: Saw pod success 08/24/23 12:03:45.676 + Aug 24 12:03:45.676: INFO: Pod "downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673" satisfied condition "Succeeded or Failed" + Aug 24 12:03:45.680: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673 container client-container: + STEP: delete the pod 08/24/23 12:03:45.691 + Aug 24 12:03:45.712: INFO: Waiting for pod downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673 to disappear + Aug 24 12:03:45.716: INFO: Pod downwardapi-volume-e90807c3-2b4f-4f8d-9145-e0d231350673 no longer exists + [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 15:56:08.558: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected configMap + Aug 24 12:03:45.716: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "projected-3118" for this suite. 07/29/23 15:56:08.569 + STEP: Destroying namespace "downward-api-7237" for this suite. 08/24/23 12:03:45.725 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSS +S ------------------------------ -[sig-api-machinery] Garbage collector - should orphan pods created by rc if delete options say so [Conformance] - test/e2e/apimachinery/garbage_collector.go:370 -[BeforeEach] [sig-api-machinery] Garbage collector +[sig-auth] ServiceAccounts + should mount projected service account token [Conformance] + test/e2e/auth/service_accounts.go:275 +[BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:56:08.584 -Jul 29 15:56:08.584: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename gc 07/29/23 15:56:08.585 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:56:08.633 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:56:08.638 -[BeforeEach] [sig-api-machinery] Garbage collector +STEP: Creating a kubernetes client 08/24/23 12:03:45.74 +Aug 24 12:03:45.740: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename svcaccounts 08/24/23 12:03:45.744 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:45.773 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:45.778 +[BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 -[It] should orphan pods created by rc if delete options say so [Conformance] - test/e2e/apimachinery/garbage_collector.go:370 -STEP: create the rc 07/29/23 15:56:08.652 -STEP: delete the rc 07/29/23 15:56:13.782 -STEP: wait for the rc to be deleted 07/29/23 15:56:13.987 -STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods 07/29/23 15:56:19.08 -STEP: Gathering metrics 07/29/23 15:56:49.127 -Jul 29 15:56:49.184: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" -Jul 29 15:56:49.191: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 7.585179ms -Jul 29 15:56:49.192: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) -Jul 29 15:56:49.192: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" -Jul 29 15:56:49.345: INFO: For apiserver_request_total: -For apiserver_request_latency_seconds: -For apiserver_init_events_total: -For garbage_collector_attempt_to_delete_queue_latency: -For garbage_collector_attempt_to_delete_work_duration: -For garbage_collector_attempt_to_orphan_queue_latency: -For garbage_collector_attempt_to_orphan_work_duration: -For garbage_collector_dirty_processing_latency_microseconds: -For garbage_collector_event_processing_latency_microseconds: -For garbage_collector_graph_changes_queue_latency: -For garbage_collector_graph_changes_work_duration: -For garbage_collector_orphan_processing_latency_microseconds: -For namespace_queue_latency: -For namespace_queue_latency_sum: -For namespace_queue_latency_count: -For namespace_retries: -For namespace_work_duration: -For namespace_work_duration_sum: -For namespace_work_duration_count: -For function_duration_seconds: -For errors_total: -For evicted_pods_total: - -Jul 29 15:56:49.346: INFO: Deleting pod "simpletest.rc-27hr9" in namespace "gc-2286" -Jul 29 15:56:49.410: INFO: Deleting pod "simpletest.rc-2mwch" in namespace "gc-2286" -Jul 29 15:56:49.446: INFO: Deleting pod "simpletest.rc-2nbzk" in namespace "gc-2286" -Jul 29 15:56:49.545: INFO: Deleting pod "simpletest.rc-2nldt" in namespace "gc-2286" -Jul 29 15:56:49.616: INFO: Deleting pod "simpletest.rc-2t2gr" in namespace "gc-2286" -Jul 29 15:56:49.682: INFO: Deleting pod "simpletest.rc-2zszn" in namespace "gc-2286" -Jul 29 15:56:49.749: INFO: Deleting pod "simpletest.rc-4gfgp" in namespace "gc-2286" -Jul 29 15:56:49.810: INFO: Deleting pod "simpletest.rc-4xvpb" in namespace "gc-2286" -Jul 29 15:56:49.912: INFO: Deleting pod "simpletest.rc-596lf" in namespace "gc-2286" -Jul 29 15:56:49.980: INFO: Deleting pod "simpletest.rc-5rw8r" in namespace "gc-2286" -Jul 29 15:56:50.066: INFO: Deleting pod "simpletest.rc-6b2rw" in namespace "gc-2286" -Jul 29 15:56:50.131: INFO: Deleting pod "simpletest.rc-6b9tn" in namespace "gc-2286" -Jul 29 15:56:50.176: INFO: Deleting pod "simpletest.rc-6cwnz" in namespace "gc-2286" -Jul 29 15:56:50.234: INFO: Deleting pod "simpletest.rc-72cmd" in namespace "gc-2286" -Jul 29 15:56:50.289: INFO: Deleting pod "simpletest.rc-74n9j" in namespace "gc-2286" -Jul 29 15:56:50.357: INFO: Deleting pod "simpletest.rc-7mjpq" in namespace "gc-2286" -Jul 29 15:56:50.451: INFO: Deleting pod "simpletest.rc-7pb7c" in namespace "gc-2286" -Jul 29 15:56:50.495: INFO: Deleting pod "simpletest.rc-865gs" in namespace "gc-2286" -Jul 29 15:56:50.601: INFO: Deleting pod "simpletest.rc-8b8tn" in namespace "gc-2286" -Jul 29 15:56:50.691: INFO: Deleting pod "simpletest.rc-8hq8t" in namespace "gc-2286" -Jul 29 15:56:50.744: INFO: Deleting pod "simpletest.rc-8kqn6" in namespace "gc-2286" -Jul 29 15:56:50.781: INFO: Deleting pod "simpletest.rc-8vffn" in namespace "gc-2286" -Jul 29 15:56:50.828: INFO: Deleting pod "simpletest.rc-95wvs" in namespace "gc-2286" -Jul 29 15:56:50.931: INFO: Deleting pod "simpletest.rc-99jbv" in namespace "gc-2286" -Jul 29 15:56:50.990: INFO: Deleting pod "simpletest.rc-9bh6c" in namespace "gc-2286" -Jul 29 15:56:51.054: INFO: Deleting pod "simpletest.rc-9fpfl" in namespace "gc-2286" -Jul 29 15:56:51.119: INFO: Deleting pod "simpletest.rc-9rrdg" in namespace "gc-2286" -Jul 29 15:56:51.235: INFO: Deleting pod "simpletest.rc-9tn5v" in namespace "gc-2286" -Jul 29 15:56:51.328: INFO: Deleting pod "simpletest.rc-bmkhq" in namespace "gc-2286" -Jul 29 15:56:51.388: INFO: Deleting pod "simpletest.rc-bscql" in namespace "gc-2286" -Jul 29 15:56:51.497: INFO: Deleting pod "simpletest.rc-bvcc7" in namespace "gc-2286" -Jul 29 15:56:51.608: INFO: Deleting pod "simpletest.rc-bzffn" in namespace "gc-2286" -Jul 29 15:56:51.658: INFO: Deleting pod "simpletest.rc-ckfsv" in namespace "gc-2286" -Jul 29 15:56:51.936: INFO: Deleting pod "simpletest.rc-dlsqv" in namespace "gc-2286" -Jul 29 15:56:52.051: INFO: Deleting pod "simpletest.rc-dqqmr" in namespace "gc-2286" -Jul 29 15:56:52.143: INFO: Deleting pod "simpletest.rc-dvbtr" in namespace "gc-2286" -Jul 29 15:56:52.226: INFO: Deleting pod "simpletest.rc-f59s6" in namespace "gc-2286" -Jul 29 15:56:52.405: INFO: Deleting pod "simpletest.rc-f8xp5" in namespace "gc-2286" -Jul 29 15:56:52.517: INFO: Deleting pod "simpletest.rc-f9zr7" in namespace "gc-2286" -Jul 29 15:56:52.595: INFO: Deleting pod "simpletest.rc-fh72s" in namespace "gc-2286" -Jul 29 15:56:52.668: INFO: Deleting pod "simpletest.rc-fnzx7" in namespace "gc-2286" -Jul 29 15:56:52.743: INFO: Deleting pod "simpletest.rc-fzz9l" in namespace "gc-2286" -Jul 29 15:56:52.791: INFO: Deleting pod "simpletest.rc-g59sg" in namespace "gc-2286" -Jul 29 15:56:52.844: INFO: Deleting pod "simpletest.rc-gct7b" in namespace "gc-2286" -Jul 29 15:56:52.900: INFO: Deleting pod "simpletest.rc-ggh7n" in namespace "gc-2286" -Jul 29 15:56:52.945: INFO: Deleting pod "simpletest.rc-gk5ts" in namespace "gc-2286" -Jul 29 15:56:52.968: INFO: Deleting pod "simpletest.rc-hj87t" in namespace "gc-2286" -Jul 29 15:56:53.044: INFO: Deleting pod "simpletest.rc-hlk6x" in namespace "gc-2286" -Jul 29 15:56:53.097: INFO: Deleting pod "simpletest.rc-hxm26" in namespace "gc-2286" -Jul 29 15:56:53.141: INFO: Deleting pod "simpletest.rc-j29sz" in namespace "gc-2286" -Jul 29 15:56:53.197: INFO: Deleting pod "simpletest.rc-jp9vl" in namespace "gc-2286" -Jul 29 15:56:53.261: INFO: Deleting pod "simpletest.rc-k989z" in namespace "gc-2286" -Jul 29 15:56:53.329: INFO: Deleting pod "simpletest.rc-ksb6n" in namespace "gc-2286" -Jul 29 15:56:53.453: INFO: Deleting pod "simpletest.rc-ktztp" in namespace "gc-2286" -Jul 29 15:56:53.639: INFO: Deleting pod "simpletest.rc-kvkhv" in namespace "gc-2286" -Jul 29 15:56:53.709: INFO: Deleting pod "simpletest.rc-kx7xq" in namespace "gc-2286" -Jul 29 15:56:53.796: INFO: Deleting pod "simpletest.rc-lzbl7" in namespace "gc-2286" -Jul 29 15:56:53.884: INFO: Deleting pod "simpletest.rc-n5t2z" in namespace "gc-2286" -Jul 29 15:56:53.942: INFO: Deleting pod "simpletest.rc-n68lj" in namespace "gc-2286" -Jul 29 15:56:54.010: INFO: Deleting pod "simpletest.rc-nbpmx" in namespace "gc-2286" -Jul 29 15:56:54.055: INFO: Deleting pod "simpletest.rc-njdl2" in namespace "gc-2286" -Jul 29 15:56:54.151: INFO: Deleting pod "simpletest.rc-nkbsp" in namespace "gc-2286" -Jul 29 15:56:54.250: INFO: Deleting pod "simpletest.rc-pb7sr" in namespace "gc-2286" -Jul 29 15:56:54.382: INFO: Deleting pod "simpletest.rc-pdr92" in namespace "gc-2286" -Jul 29 15:56:54.510: INFO: Deleting pod "simpletest.rc-pdwkb" in namespace "gc-2286" -Jul 29 15:56:54.710: INFO: Deleting pod "simpletest.rc-pnf4h" in namespace "gc-2286" -Jul 29 15:56:54.758: INFO: Deleting pod "simpletest.rc-pp74w" in namespace "gc-2286" -Jul 29 15:56:54.802: INFO: Deleting pod "simpletest.rc-pqpsd" in namespace "gc-2286" -Jul 29 15:56:54.837: INFO: Deleting pod "simpletest.rc-q7rcx" in namespace "gc-2286" -Jul 29 15:56:54.898: INFO: Deleting pod "simpletest.rc-qb5jd" in namespace "gc-2286" -Jul 29 15:56:54.969: INFO: Deleting pod "simpletest.rc-qd7c4" in namespace "gc-2286" -Jul 29 15:56:55.031: INFO: Deleting pod "simpletest.rc-qdzl8" in namespace "gc-2286" -Jul 29 15:56:55.192: INFO: Deleting pod "simpletest.rc-qh5zz" in namespace "gc-2286" -Jul 29 15:56:55.254: INFO: Deleting pod "simpletest.rc-qs945" in namespace "gc-2286" -Jul 29 15:56:55.340: INFO: Deleting pod "simpletest.rc-qzspl" in namespace "gc-2286" -Jul 29 15:56:55.421: INFO: Deleting pod "simpletest.rc-rhw54" in namespace "gc-2286" -Jul 29 15:56:55.480: INFO: Deleting pod "simpletest.rc-s4dl4" in namespace "gc-2286" -Jul 29 15:56:55.599: INFO: Deleting pod "simpletest.rc-sd6mn" in namespace "gc-2286" -Jul 29 15:56:55.719: INFO: Deleting pod "simpletest.rc-smm8l" in namespace "gc-2286" -Jul 29 15:56:55.858: INFO: Deleting pod "simpletest.rc-sp4fj" in namespace "gc-2286" -Jul 29 15:56:55.968: INFO: Deleting pod "simpletest.rc-sqkqg" in namespace "gc-2286" -Jul 29 15:56:56.123: INFO: Deleting pod "simpletest.rc-t244p" in namespace "gc-2286" -Jul 29 15:56:56.197: INFO: Deleting pod "simpletest.rc-tjrjh" in namespace "gc-2286" -Jul 29 15:56:56.298: INFO: Deleting pod "simpletest.rc-vdsl7" in namespace "gc-2286" -Jul 29 15:56:56.370: INFO: Deleting pod "simpletest.rc-vdwrs" in namespace "gc-2286" -Jul 29 15:56:56.464: INFO: Deleting pod "simpletest.rc-vs7sg" in namespace "gc-2286" -Jul 29 15:56:56.560: INFO: Deleting pod "simpletest.rc-wcqwt" in namespace "gc-2286" -Jul 29 15:56:56.634: INFO: Deleting pod "simpletest.rc-wgthz" in namespace "gc-2286" -Jul 29 15:56:56.701: INFO: Deleting pod "simpletest.rc-wmd4m" in namespace "gc-2286" -Jul 29 15:56:56.786: INFO: Deleting pod "simpletest.rc-wml7m" in namespace "gc-2286" -Jul 29 15:56:56.858: INFO: Deleting pod "simpletest.rc-wqpjx" in namespace "gc-2286" -Jul 29 15:56:56.891: INFO: Deleting pod "simpletest.rc-x7qln" in namespace "gc-2286" -Jul 29 15:56:56.982: INFO: Deleting pod "simpletest.rc-xk4rg" in namespace "gc-2286" -Jul 29 15:56:57.049: INFO: Deleting pod "simpletest.rc-xqcjf" in namespace "gc-2286" -Jul 29 15:56:57.159: INFO: Deleting pod "simpletest.rc-xtz7r" in namespace "gc-2286" -Jul 29 15:56:57.223: INFO: Deleting pod "simpletest.rc-z6qct" in namespace "gc-2286" -Jul 29 15:56:57.294: INFO: Deleting pod "simpletest.rc-z7xx6" in namespace "gc-2286" -Jul 29 15:56:57.345: INFO: Deleting pod "simpletest.rc-zj4pr" in namespace "gc-2286" -Jul 29 15:56:57.480: INFO: Deleting pod "simpletest.rc-zv7j7" in namespace "gc-2286" -Jul 29 15:56:57.595: INFO: Deleting pod "simpletest.rc-zzc9m" in namespace "gc-2286" -[AfterEach] [sig-api-machinery] Garbage collector +[It] should mount projected service account token [Conformance] + test/e2e/auth/service_accounts.go:275 +STEP: Creating a pod to test service account token: 08/24/23 12:03:45.783 +Aug 24 12:03:45.799: INFO: Waiting up to 5m0s for pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c" in namespace "svcaccounts-3287" to be "Succeeded or Failed" +Aug 24 12:03:45.811: INFO: Pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c": Phase="Pending", Reason="", readiness=false. Elapsed: 11.134521ms +Aug 24 12:03:47.828: INFO: Pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c": Phase="Running", Reason="", readiness=false. Elapsed: 2.027967742s +Aug 24 12:03:49.822: INFO: Pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c": Phase="Running", Reason="", readiness=false. Elapsed: 4.021868426s +Aug 24 12:03:51.821: INFO: Pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.020965843s +STEP: Saw pod success 08/24/23 12:03:51.821 +Aug 24 12:03:51.822: INFO: Pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c" satisfied condition "Succeeded or Failed" +Aug 24 12:03:51.828: INFO: Trying to get logs from node pe9deep4seen-3 pod test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c container agnhost-container: +STEP: delete the pod 08/24/23 12:03:51.84 +Aug 24 12:03:51.858: INFO: Waiting for pod test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c to disappear +Aug 24 12:03:51.864: INFO: Pod test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c no longer exists +[AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 -Jul 29 15:56:57.733: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +Aug 24 12:03:51.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +[DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +[DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 -STEP: Destroying namespace "gc-2286" for this suite. 07/29/23 15:56:57.761 +STEP: Destroying namespace "svcaccounts-3287" for this suite. 08/24/23 12:03:51.873 ------------------------------ -• [SLOW TEST] [49.206 seconds] -[sig-api-machinery] Garbage collector -test/e2e/apimachinery/framework.go:23 - should orphan pods created by rc if delete options say so [Conformance] - test/e2e/apimachinery/garbage_collector.go:370 +• [SLOW TEST] [6.145 seconds] +[sig-auth] ServiceAccounts +test/e2e/auth/framework.go:23 + should mount projected service account token [Conformance] + test/e2e/auth/service_accounts.go:275 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Garbage collector + [BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:56:08.584 - Jul 29 15:56:08.584: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename gc 07/29/23 15:56:08.585 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:56:08.633 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:56:08.638 - [BeforeEach] [sig-api-machinery] Garbage collector + STEP: Creating a kubernetes client 08/24/23 12:03:45.74 + Aug 24 12:03:45.740: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename svcaccounts 08/24/23 12:03:45.744 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:45.773 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:45.778 + [BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 - [It] should orphan pods created by rc if delete options say so [Conformance] - test/e2e/apimachinery/garbage_collector.go:370 - STEP: create the rc 07/29/23 15:56:08.652 - STEP: delete the rc 07/29/23 15:56:13.782 - STEP: wait for the rc to be deleted 07/29/23 15:56:13.987 - STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods 07/29/23 15:56:19.08 - STEP: Gathering metrics 07/29/23 15:56:49.127 - Jul 29 15:56:49.184: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" - Jul 29 15:56:49.191: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 7.585179ms - Jul 29 15:56:49.192: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) - Jul 29 15:56:49.192: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" - Jul 29 15:56:49.345: INFO: For apiserver_request_total: - For apiserver_request_latency_seconds: - For apiserver_init_events_total: - For garbage_collector_attempt_to_delete_queue_latency: - For garbage_collector_attempt_to_delete_work_duration: - For garbage_collector_attempt_to_orphan_queue_latency: - For garbage_collector_attempt_to_orphan_work_duration: - For garbage_collector_dirty_processing_latency_microseconds: - For garbage_collector_event_processing_latency_microseconds: - For garbage_collector_graph_changes_queue_latency: - For garbage_collector_graph_changes_work_duration: - For garbage_collector_orphan_processing_latency_microseconds: - For namespace_queue_latency: - For namespace_queue_latency_sum: - For namespace_queue_latency_count: - For namespace_retries: - For namespace_work_duration: - For namespace_work_duration_sum: - For namespace_work_duration_count: - For function_duration_seconds: - For errors_total: - For evicted_pods_total: - - Jul 29 15:56:49.346: INFO: Deleting pod "simpletest.rc-27hr9" in namespace "gc-2286" - Jul 29 15:56:49.410: INFO: Deleting pod "simpletest.rc-2mwch" in namespace "gc-2286" - Jul 29 15:56:49.446: INFO: Deleting pod "simpletest.rc-2nbzk" in namespace "gc-2286" - Jul 29 15:56:49.545: INFO: Deleting pod "simpletest.rc-2nldt" in namespace "gc-2286" - Jul 29 15:56:49.616: INFO: Deleting pod "simpletest.rc-2t2gr" in namespace "gc-2286" - Jul 29 15:56:49.682: INFO: Deleting pod "simpletest.rc-2zszn" in namespace "gc-2286" - Jul 29 15:56:49.749: INFO: Deleting pod "simpletest.rc-4gfgp" in namespace "gc-2286" - Jul 29 15:56:49.810: INFO: Deleting pod "simpletest.rc-4xvpb" in namespace "gc-2286" - Jul 29 15:56:49.912: INFO: Deleting pod "simpletest.rc-596lf" in namespace "gc-2286" - Jul 29 15:56:49.980: INFO: Deleting pod "simpletest.rc-5rw8r" in namespace "gc-2286" - Jul 29 15:56:50.066: INFO: Deleting pod "simpletest.rc-6b2rw" in namespace "gc-2286" - Jul 29 15:56:50.131: INFO: Deleting pod "simpletest.rc-6b9tn" in namespace "gc-2286" - Jul 29 15:56:50.176: INFO: Deleting pod "simpletest.rc-6cwnz" in namespace "gc-2286" - Jul 29 15:56:50.234: INFO: Deleting pod "simpletest.rc-72cmd" in namespace "gc-2286" - Jul 29 15:56:50.289: INFO: Deleting pod "simpletest.rc-74n9j" in namespace "gc-2286" - Jul 29 15:56:50.357: INFO: Deleting pod "simpletest.rc-7mjpq" in namespace "gc-2286" - Jul 29 15:56:50.451: INFO: Deleting pod "simpletest.rc-7pb7c" in namespace "gc-2286" - Jul 29 15:56:50.495: INFO: Deleting pod "simpletest.rc-865gs" in namespace "gc-2286" - Jul 29 15:56:50.601: INFO: Deleting pod "simpletest.rc-8b8tn" in namespace "gc-2286" - Jul 29 15:56:50.691: INFO: Deleting pod "simpletest.rc-8hq8t" in namespace "gc-2286" - Jul 29 15:56:50.744: INFO: Deleting pod "simpletest.rc-8kqn6" in namespace "gc-2286" - Jul 29 15:56:50.781: INFO: Deleting pod "simpletest.rc-8vffn" in namespace "gc-2286" - Jul 29 15:56:50.828: INFO: Deleting pod "simpletest.rc-95wvs" in namespace "gc-2286" - Jul 29 15:56:50.931: INFO: Deleting pod "simpletest.rc-99jbv" in namespace "gc-2286" - Jul 29 15:56:50.990: INFO: Deleting pod "simpletest.rc-9bh6c" in namespace "gc-2286" - Jul 29 15:56:51.054: INFO: Deleting pod "simpletest.rc-9fpfl" in namespace "gc-2286" - Jul 29 15:56:51.119: INFO: Deleting pod "simpletest.rc-9rrdg" in namespace "gc-2286" - Jul 29 15:56:51.235: INFO: Deleting pod "simpletest.rc-9tn5v" in namespace "gc-2286" - Jul 29 15:56:51.328: INFO: Deleting pod "simpletest.rc-bmkhq" in namespace "gc-2286" - Jul 29 15:56:51.388: INFO: Deleting pod "simpletest.rc-bscql" in namespace "gc-2286" - Jul 29 15:56:51.497: INFO: Deleting pod "simpletest.rc-bvcc7" in namespace "gc-2286" - Jul 29 15:56:51.608: INFO: Deleting pod "simpletest.rc-bzffn" in namespace "gc-2286" - Jul 29 15:56:51.658: INFO: Deleting pod "simpletest.rc-ckfsv" in namespace "gc-2286" - Jul 29 15:56:51.936: INFO: Deleting pod "simpletest.rc-dlsqv" in namespace "gc-2286" - Jul 29 15:56:52.051: INFO: Deleting pod "simpletest.rc-dqqmr" in namespace "gc-2286" - Jul 29 15:56:52.143: INFO: Deleting pod "simpletest.rc-dvbtr" in namespace "gc-2286" - Jul 29 15:56:52.226: INFO: Deleting pod "simpletest.rc-f59s6" in namespace "gc-2286" - Jul 29 15:56:52.405: INFO: Deleting pod "simpletest.rc-f8xp5" in namespace "gc-2286" - Jul 29 15:56:52.517: INFO: Deleting pod "simpletest.rc-f9zr7" in namespace "gc-2286" - Jul 29 15:56:52.595: INFO: Deleting pod "simpletest.rc-fh72s" in namespace "gc-2286" - Jul 29 15:56:52.668: INFO: Deleting pod "simpletest.rc-fnzx7" in namespace "gc-2286" - Jul 29 15:56:52.743: INFO: Deleting pod "simpletest.rc-fzz9l" in namespace "gc-2286" - Jul 29 15:56:52.791: INFO: Deleting pod "simpletest.rc-g59sg" in namespace "gc-2286" - Jul 29 15:56:52.844: INFO: Deleting pod "simpletest.rc-gct7b" in namespace "gc-2286" - Jul 29 15:56:52.900: INFO: Deleting pod "simpletest.rc-ggh7n" in namespace "gc-2286" - Jul 29 15:56:52.945: INFO: Deleting pod "simpletest.rc-gk5ts" in namespace "gc-2286" - Jul 29 15:56:52.968: INFO: Deleting pod "simpletest.rc-hj87t" in namespace "gc-2286" - Jul 29 15:56:53.044: INFO: Deleting pod "simpletest.rc-hlk6x" in namespace "gc-2286" - Jul 29 15:56:53.097: INFO: Deleting pod "simpletest.rc-hxm26" in namespace "gc-2286" - Jul 29 15:56:53.141: INFO: Deleting pod "simpletest.rc-j29sz" in namespace "gc-2286" - Jul 29 15:56:53.197: INFO: Deleting pod "simpletest.rc-jp9vl" in namespace "gc-2286" - Jul 29 15:56:53.261: INFO: Deleting pod "simpletest.rc-k989z" in namespace "gc-2286" - Jul 29 15:56:53.329: INFO: Deleting pod "simpletest.rc-ksb6n" in namespace "gc-2286" - Jul 29 15:56:53.453: INFO: Deleting pod "simpletest.rc-ktztp" in namespace "gc-2286" - Jul 29 15:56:53.639: INFO: Deleting pod "simpletest.rc-kvkhv" in namespace "gc-2286" - Jul 29 15:56:53.709: INFO: Deleting pod "simpletest.rc-kx7xq" in namespace "gc-2286" - Jul 29 15:56:53.796: INFO: Deleting pod "simpletest.rc-lzbl7" in namespace "gc-2286" - Jul 29 15:56:53.884: INFO: Deleting pod "simpletest.rc-n5t2z" in namespace "gc-2286" - Jul 29 15:56:53.942: INFO: Deleting pod "simpletest.rc-n68lj" in namespace "gc-2286" - Jul 29 15:56:54.010: INFO: Deleting pod "simpletest.rc-nbpmx" in namespace "gc-2286" - Jul 29 15:56:54.055: INFO: Deleting pod "simpletest.rc-njdl2" in namespace "gc-2286" - Jul 29 15:56:54.151: INFO: Deleting pod "simpletest.rc-nkbsp" in namespace "gc-2286" - Jul 29 15:56:54.250: INFO: Deleting pod "simpletest.rc-pb7sr" in namespace "gc-2286" - Jul 29 15:56:54.382: INFO: Deleting pod "simpletest.rc-pdr92" in namespace "gc-2286" - Jul 29 15:56:54.510: INFO: Deleting pod "simpletest.rc-pdwkb" in namespace "gc-2286" - Jul 29 15:56:54.710: INFO: Deleting pod "simpletest.rc-pnf4h" in namespace "gc-2286" - Jul 29 15:56:54.758: INFO: Deleting pod "simpletest.rc-pp74w" in namespace "gc-2286" - Jul 29 15:56:54.802: INFO: Deleting pod "simpletest.rc-pqpsd" in namespace "gc-2286" - Jul 29 15:56:54.837: INFO: Deleting pod "simpletest.rc-q7rcx" in namespace "gc-2286" - Jul 29 15:56:54.898: INFO: Deleting pod "simpletest.rc-qb5jd" in namespace "gc-2286" - Jul 29 15:56:54.969: INFO: Deleting pod "simpletest.rc-qd7c4" in namespace "gc-2286" - Jul 29 15:56:55.031: INFO: Deleting pod "simpletest.rc-qdzl8" in namespace "gc-2286" - Jul 29 15:56:55.192: INFO: Deleting pod "simpletest.rc-qh5zz" in namespace "gc-2286" - Jul 29 15:56:55.254: INFO: Deleting pod "simpletest.rc-qs945" in namespace "gc-2286" - Jul 29 15:56:55.340: INFO: Deleting pod "simpletest.rc-qzspl" in namespace "gc-2286" - Jul 29 15:56:55.421: INFO: Deleting pod "simpletest.rc-rhw54" in namespace "gc-2286" - Jul 29 15:56:55.480: INFO: Deleting pod "simpletest.rc-s4dl4" in namespace "gc-2286" - Jul 29 15:56:55.599: INFO: Deleting pod "simpletest.rc-sd6mn" in namespace "gc-2286" - Jul 29 15:56:55.719: INFO: Deleting pod "simpletest.rc-smm8l" in namespace "gc-2286" - Jul 29 15:56:55.858: INFO: Deleting pod "simpletest.rc-sp4fj" in namespace "gc-2286" - Jul 29 15:56:55.968: INFO: Deleting pod "simpletest.rc-sqkqg" in namespace "gc-2286" - Jul 29 15:56:56.123: INFO: Deleting pod "simpletest.rc-t244p" in namespace "gc-2286" - Jul 29 15:56:56.197: INFO: Deleting pod "simpletest.rc-tjrjh" in namespace "gc-2286" - Jul 29 15:56:56.298: INFO: Deleting pod "simpletest.rc-vdsl7" in namespace "gc-2286" - Jul 29 15:56:56.370: INFO: Deleting pod "simpletest.rc-vdwrs" in namespace "gc-2286" - Jul 29 15:56:56.464: INFO: Deleting pod "simpletest.rc-vs7sg" in namespace "gc-2286" - Jul 29 15:56:56.560: INFO: Deleting pod "simpletest.rc-wcqwt" in namespace "gc-2286" - Jul 29 15:56:56.634: INFO: Deleting pod "simpletest.rc-wgthz" in namespace "gc-2286" - Jul 29 15:56:56.701: INFO: Deleting pod "simpletest.rc-wmd4m" in namespace "gc-2286" - Jul 29 15:56:56.786: INFO: Deleting pod "simpletest.rc-wml7m" in namespace "gc-2286" - Jul 29 15:56:56.858: INFO: Deleting pod "simpletest.rc-wqpjx" in namespace "gc-2286" - Jul 29 15:56:56.891: INFO: Deleting pod "simpletest.rc-x7qln" in namespace "gc-2286" - Jul 29 15:56:56.982: INFO: Deleting pod "simpletest.rc-xk4rg" in namespace "gc-2286" - Jul 29 15:56:57.049: INFO: Deleting pod "simpletest.rc-xqcjf" in namespace "gc-2286" - Jul 29 15:56:57.159: INFO: Deleting pod "simpletest.rc-xtz7r" in namespace "gc-2286" - Jul 29 15:56:57.223: INFO: Deleting pod "simpletest.rc-z6qct" in namespace "gc-2286" - Jul 29 15:56:57.294: INFO: Deleting pod "simpletest.rc-z7xx6" in namespace "gc-2286" - Jul 29 15:56:57.345: INFO: Deleting pod "simpletest.rc-zj4pr" in namespace "gc-2286" - Jul 29 15:56:57.480: INFO: Deleting pod "simpletest.rc-zv7j7" in namespace "gc-2286" - Jul 29 15:56:57.595: INFO: Deleting pod "simpletest.rc-zzc9m" in namespace "gc-2286" - [AfterEach] [sig-api-machinery] Garbage collector + [It] should mount projected service account token [Conformance] + test/e2e/auth/service_accounts.go:275 + STEP: Creating a pod to test service account token: 08/24/23 12:03:45.783 + Aug 24 12:03:45.799: INFO: Waiting up to 5m0s for pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c" in namespace "svcaccounts-3287" to be "Succeeded or Failed" + Aug 24 12:03:45.811: INFO: Pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c": Phase="Pending", Reason="", readiness=false. Elapsed: 11.134521ms + Aug 24 12:03:47.828: INFO: Pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c": Phase="Running", Reason="", readiness=false. Elapsed: 2.027967742s + Aug 24 12:03:49.822: INFO: Pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c": Phase="Running", Reason="", readiness=false. Elapsed: 4.021868426s + Aug 24 12:03:51.821: INFO: Pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.020965843s + STEP: Saw pod success 08/24/23 12:03:51.821 + Aug 24 12:03:51.822: INFO: Pod "test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c" satisfied condition "Succeeded or Failed" + Aug 24 12:03:51.828: INFO: Trying to get logs from node pe9deep4seen-3 pod test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c container agnhost-container: + STEP: delete the pod 08/24/23 12:03:51.84 + Aug 24 12:03:51.858: INFO: Waiting for pod test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c to disappear + Aug 24 12:03:51.864: INFO: Pod test-pod-ac12e53d-62c9-4ee9-a368-d61c79e1944c no longer exists + [AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 - Jul 29 15:56:57.733: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + Aug 24 12:03:51.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + [DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + [DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 - STEP: Destroying namespace "gc-2286" for this suite. 07/29/23 15:56:57.761 + STEP: Destroying namespace "svcaccounts-3287" for this suite. 08/24/23 12:03:51.873 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Kubelet when scheduling a busybox command in a pod - should print the output to logs [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:52 -[BeforeEach] [sig-node] Kubelet +[sig-network] Proxy version v1 + A set of valid responses are returned for both pod and service Proxy [Conformance] + test/e2e/network/proxy.go:380 +[BeforeEach] version v1 set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:56:57.809 -Jul 29 15:56:57.809: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubelet-test 07/29/23 15:56:57.819 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:56:57.889 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:56:57.895 -[BeforeEach] [sig-node] Kubelet +STEP: Creating a kubernetes client 08/24/23 12:03:51.891 +Aug 24 12:03:51.891: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename proxy 08/24/23 12:03:51.893 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:51.922 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:51.926 +[BeforeEach] version v1 test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Kubelet - test/e2e/common/node/kubelet.go:41 -[It] should print the output to logs [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:52 -Jul 29 15:56:57.915: INFO: Waiting up to 5m0s for pod "busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a" in namespace "kubelet-test-3782" to be "running and ready" -Jul 29 15:56:57.984: INFO: Pod "busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a": Phase="Pending", Reason="", readiness=false. Elapsed: 59.579202ms -Jul 29 15:56:57.984: INFO: The phase of Pod busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:56:59.991: INFO: Pod "busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.066971308s -Jul 29 15:56:59.991: INFO: The phase of Pod busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a is Pending, waiting for it to be Running (with Ready = true) -Jul 29 15:57:01.995: INFO: Pod "busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a": Phase="Running", Reason="", readiness=true. Elapsed: 4.071421386s -Jul 29 15:57:01.996: INFO: The phase of Pod busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a is Running (Ready = true) -Jul 29 15:57:01.996: INFO: Pod "busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a" satisfied condition "running and ready" -[AfterEach] [sig-node] Kubelet +[It] A set of valid responses are returned for both pod and service Proxy [Conformance] + test/e2e/network/proxy.go:380 +Aug 24 12:03:51.930: INFO: Creating pod... +Aug 24 12:03:51.944: INFO: Waiting up to 5m0s for pod "agnhost" in namespace "proxy-6638" to be "running" +Aug 24 12:03:51.948: INFO: Pod "agnhost": Phase="Pending", Reason="", readiness=false. Elapsed: 3.747703ms +Aug 24 12:03:53.955: INFO: Pod "agnhost": Phase="Running", Reason="", readiness=true. Elapsed: 2.01166265s +Aug 24 12:03:53.956: INFO: Pod "agnhost" satisfied condition "running" +Aug 24 12:03:53.956: INFO: Creating service... +Aug 24 12:03:53.972: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=DELETE +Aug 24 12:03:53.989: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE +Aug 24 12:03:53.990: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=OPTIONS +Aug 24 12:03:54.001: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS +Aug 24 12:03:54.001: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=PATCH +Aug 24 12:03:54.011: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH +Aug 24 12:03:54.011: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=POST +Aug 24 12:03:54.019: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST +Aug 24 12:03:54.019: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=PUT +Aug 24 12:03:54.026: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT +Aug 24 12:03:54.027: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=DELETE +Aug 24 12:03:54.038: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE +Aug 24 12:03:54.038: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=OPTIONS +Aug 24 12:03:54.052: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS +Aug 24 12:03:54.052: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=PATCH +Aug 24 12:03:54.062: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH +Aug 24 12:03:54.062: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=POST +Aug 24 12:03:54.073: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST +Aug 24 12:03:54.073: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=PUT +Aug 24 12:03:54.105: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT +Aug 24 12:03:54.105: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=GET +Aug 24 12:03:54.115: INFO: http.Client request:GET StatusCode:301 +Aug 24 12:03:54.115: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=GET +Aug 24 12:03:54.126: INFO: http.Client request:GET StatusCode:301 +Aug 24 12:03:54.127: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=HEAD +Aug 24 12:03:54.132: INFO: http.Client request:HEAD StatusCode:301 +Aug 24 12:03:54.132: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=HEAD +Aug 24 12:03:54.140: INFO: http.Client request:HEAD StatusCode:301 +[AfterEach] version v1 test/e2e/framework/node/init/init.go:32 -Jul 29 15:57:02.021: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Kubelet +Aug 24 12:03:54.140: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] version v1 test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Kubelet +[DeferCleanup (Each)] version v1 dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Kubelet +[DeferCleanup (Each)] version v1 tear down framework | framework.go:193 -STEP: Destroying namespace "kubelet-test-3782" for this suite. 07/29/23 15:57:02.031 +STEP: Destroying namespace "proxy-6638" for this suite. 08/24/23 12:03:54.148 ------------------------------ -• [4.232 seconds] -[sig-node] Kubelet -test/e2e/common/node/framework.go:23 - when scheduling a busybox command in a pod - test/e2e/common/node/kubelet.go:44 - should print the output to logs [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:52 +• [2.272 seconds] +[sig-network] Proxy +test/e2e/network/common/framework.go:23 + version v1 + test/e2e/network/proxy.go:74 + A set of valid responses are returned for both pod and service Proxy [Conformance] + test/e2e/network/proxy.go:380 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Kubelet + [BeforeEach] version v1 set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:56:57.809 - Jul 29 15:56:57.809: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubelet-test 07/29/23 15:56:57.819 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:56:57.889 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:56:57.895 - [BeforeEach] [sig-node] Kubelet + STEP: Creating a kubernetes client 08/24/23 12:03:51.891 + Aug 24 12:03:51.891: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename proxy 08/24/23 12:03:51.893 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:51.922 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:51.926 + [BeforeEach] version v1 test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Kubelet - test/e2e/common/node/kubelet.go:41 - [It] should print the output to logs [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:52 - Jul 29 15:56:57.915: INFO: Waiting up to 5m0s for pod "busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a" in namespace "kubelet-test-3782" to be "running and ready" - Jul 29 15:56:57.984: INFO: Pod "busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a": Phase="Pending", Reason="", readiness=false. Elapsed: 59.579202ms - Jul 29 15:56:57.984: INFO: The phase of Pod busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:56:59.991: INFO: Pod "busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.066971308s - Jul 29 15:56:59.991: INFO: The phase of Pod busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a is Pending, waiting for it to be Running (with Ready = true) - Jul 29 15:57:01.995: INFO: Pod "busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a": Phase="Running", Reason="", readiness=true. Elapsed: 4.071421386s - Jul 29 15:57:01.996: INFO: The phase of Pod busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a is Running (Ready = true) - Jul 29 15:57:01.996: INFO: Pod "busybox-scheduling-73074f63-d58c-4288-8d3a-1b5b6d6b6c1a" satisfied condition "running and ready" - [AfterEach] [sig-node] Kubelet + [It] A set of valid responses are returned for both pod and service Proxy [Conformance] + test/e2e/network/proxy.go:380 + Aug 24 12:03:51.930: INFO: Creating pod... + Aug 24 12:03:51.944: INFO: Waiting up to 5m0s for pod "agnhost" in namespace "proxy-6638" to be "running" + Aug 24 12:03:51.948: INFO: Pod "agnhost": Phase="Pending", Reason="", readiness=false. Elapsed: 3.747703ms + Aug 24 12:03:53.955: INFO: Pod "agnhost": Phase="Running", Reason="", readiness=true. Elapsed: 2.01166265s + Aug 24 12:03:53.956: INFO: Pod "agnhost" satisfied condition "running" + Aug 24 12:03:53.956: INFO: Creating service... + Aug 24 12:03:53.972: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=DELETE + Aug 24 12:03:53.989: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE + Aug 24 12:03:53.990: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=OPTIONS + Aug 24 12:03:54.001: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS + Aug 24 12:03:54.001: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=PATCH + Aug 24 12:03:54.011: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH + Aug 24 12:03:54.011: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=POST + Aug 24 12:03:54.019: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST + Aug 24 12:03:54.019: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=PUT + Aug 24 12:03:54.026: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT + Aug 24 12:03:54.027: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=DELETE + Aug 24 12:03:54.038: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE + Aug 24 12:03:54.038: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=OPTIONS + Aug 24 12:03:54.052: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS + Aug 24 12:03:54.052: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=PATCH + Aug 24 12:03:54.062: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH + Aug 24 12:03:54.062: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=POST + Aug 24 12:03:54.073: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST + Aug 24 12:03:54.073: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=PUT + Aug 24 12:03:54.105: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT + Aug 24 12:03:54.105: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=GET + Aug 24 12:03:54.115: INFO: http.Client request:GET StatusCode:301 + Aug 24 12:03:54.115: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=GET + Aug 24 12:03:54.126: INFO: http.Client request:GET StatusCode:301 + Aug 24 12:03:54.127: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/pods/agnhost/proxy?method=HEAD + Aug 24 12:03:54.132: INFO: http.Client request:HEAD StatusCode:301 + Aug 24 12:03:54.132: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6638/services/e2e-proxy-test-service/proxy?method=HEAD + Aug 24 12:03:54.140: INFO: http.Client request:HEAD StatusCode:301 + [AfterEach] version v1 test/e2e/framework/node/init/init.go:32 - Jul 29 15:57:02.021: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Kubelet + Aug 24 12:03:54.140: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] version v1 test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Kubelet + [DeferCleanup (Each)] version v1 dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Kubelet + [DeferCleanup (Each)] version v1 tear down framework | framework.go:193 - STEP: Destroying namespace "kubelet-test-3782" for this suite. 07/29/23 15:57:02.031 + STEP: Destroying namespace "proxy-6638" for this suite. 08/24/23 12:03:54.148 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] - should list, patch and delete a collection of StatefulSets [Conformance] - test/e2e/apps/statefulset.go:908 -[BeforeEach] [sig-apps] StatefulSet +[sig-network] EndpointSlice + should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] + test/e2e/network/endpointslice.go:102 +[BeforeEach] [sig-network] EndpointSlice set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:57:02.053 -Jul 29 15:57:02.053: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename statefulset 07/29/23 15:57:02.056 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:02.096 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:02.101 -[BeforeEach] [sig-apps] StatefulSet +STEP: Creating a kubernetes client 08/24/23 12:03:54.174 +Aug 24 12:03:54.174: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename endpointslice 08/24/23 12:03:54.176 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:54.226 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:54.23 +[BeforeEach] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 -[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 -STEP: Creating service test in namespace statefulset-4001 07/29/23 15:57:02.107 -[It] should list, patch and delete a collection of StatefulSets [Conformance] - test/e2e/apps/statefulset.go:908 -Jul 29 15:57:02.147: INFO: Found 0 stateful pods, waiting for 1 -Jul 29 15:57:12.159: INFO: Waiting for pod test-ss-0 to enter Running - Ready=true, currently Running - Ready=true -STEP: patching the StatefulSet 07/29/23 15:57:12.176 -W0729 15:57:12.197190 13 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" -Jul 29 15:57:12.217: INFO: Found 1 stateful pods, waiting for 2 -Jul 29 15:57:22.229: INFO: Waiting for pod test-ss-0 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 15:57:22.230: INFO: Waiting for pod test-ss-1 to enter Running - Ready=true, currently Running - Ready=true -STEP: Listing all StatefulSets 07/29/23 15:57:22.241 -STEP: Delete all of the StatefulSets 07/29/23 15:57:22.247 -STEP: Verify that StatefulSets have been deleted 07/29/23 15:57:22.264 -[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 -Jul 29 15:57:22.273: INFO: Deleting all statefulset in ns statefulset-4001 -[AfterEach] [sig-apps] StatefulSet +[BeforeEach] [sig-network] EndpointSlice + test/e2e/network/endpointslice.go:52 +[It] should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] + test/e2e/network/endpointslice.go:102 +[AfterEach] [sig-network] EndpointSlice test/e2e/framework/node/init/init.go:32 -Jul 29 15:57:22.298: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] StatefulSet +Aug 24 12:03:56.325: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-network] EndpointSlice dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-network] EndpointSlice tear down framework | framework.go:193 -STEP: Destroying namespace "statefulset-4001" for this suite. 07/29/23 15:57:22.317 +STEP: Destroying namespace "endpointslice-9907" for this suite. 08/24/23 12:03:56.332 ------------------------------ -• [SLOW TEST] [20.362 seconds] -[sig-apps] StatefulSet -test/e2e/apps/framework.go:23 - Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:103 - should list, patch and delete a collection of StatefulSets [Conformance] - test/e2e/apps/statefulset.go:908 +• [2.169 seconds] +[sig-network] EndpointSlice +test/e2e/network/common/framework.go:23 + should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] + test/e2e/network/endpointslice.go:102 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] StatefulSet + [BeforeEach] [sig-network] EndpointSlice set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:57:02.053 - Jul 29 15:57:02.053: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename statefulset 07/29/23 15:57:02.056 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:02.096 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:02.101 - [BeforeEach] [sig-apps] StatefulSet + STEP: Creating a kubernetes client 08/24/23 12:03:54.174 + Aug 24 12:03:54.174: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename endpointslice 08/24/23 12:03:54.176 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:54.226 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:54.23 + [BeforeEach] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 - [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 - STEP: Creating service test in namespace statefulset-4001 07/29/23 15:57:02.107 - [It] should list, patch and delete a collection of StatefulSets [Conformance] - test/e2e/apps/statefulset.go:908 - Jul 29 15:57:02.147: INFO: Found 0 stateful pods, waiting for 1 - Jul 29 15:57:12.159: INFO: Waiting for pod test-ss-0 to enter Running - Ready=true, currently Running - Ready=true - STEP: patching the StatefulSet 07/29/23 15:57:12.176 - W0729 15:57:12.197190 13 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" - Jul 29 15:57:12.217: INFO: Found 1 stateful pods, waiting for 2 - Jul 29 15:57:22.229: INFO: Waiting for pod test-ss-0 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 15:57:22.230: INFO: Waiting for pod test-ss-1 to enter Running - Ready=true, currently Running - Ready=true - STEP: Listing all StatefulSets 07/29/23 15:57:22.241 - STEP: Delete all of the StatefulSets 07/29/23 15:57:22.247 - STEP: Verify that StatefulSets have been deleted 07/29/23 15:57:22.264 - [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 - Jul 29 15:57:22.273: INFO: Deleting all statefulset in ns statefulset-4001 - [AfterEach] [sig-apps] StatefulSet + [BeforeEach] [sig-network] EndpointSlice + test/e2e/network/endpointslice.go:52 + [It] should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] + test/e2e/network/endpointslice.go:102 + [AfterEach] [sig-network] EndpointSlice test/e2e/framework/node/init/init.go:32 - Jul 29 15:57:22.298: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] StatefulSet + Aug 24 12:03:56.325: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-network] EndpointSlice dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-network] EndpointSlice tear down framework | framework.go:193 - STEP: Destroying namespace "statefulset-4001" for this suite. 07/29/23 15:57:22.317 + STEP: Destroying namespace "endpointslice-9907" for this suite. 08/24/23 12:03:56.332 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] Job - should apply changes to a job status [Conformance] - test/e2e/apps/job.go:636 -[BeforeEach] [sig-apps] Job +[sig-api-machinery] Namespaces [Serial] + should apply an update to a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:366 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:57:22.42 -Jul 29 15:57:22.420: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename job 07/29/23 15:57:22.433 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:22.474 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:22.48 -[BeforeEach] [sig-apps] Job +STEP: Creating a kubernetes client 08/24/23 12:03:56.346 +Aug 24 12:03:56.346: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename namespaces 08/24/23 12:03:56.348 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:56.377 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:56.383 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] should apply changes to a job status [Conformance] - test/e2e/apps/job.go:636 -STEP: Creating a job 07/29/23 15:57:22.489 -STEP: Ensure pods equal to parallelism count is attached to the job 07/29/23 15:57:22.51 -STEP: patching /status 07/29/23 15:57:24.521 -STEP: updating /status 07/29/23 15:57:24.539 -STEP: get /status 07/29/23 15:57:24.599 -[AfterEach] [sig-apps] Job +[It] should apply an update to a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:366 +STEP: Updating Namespace "namespaces-5952" 08/24/23 12:03:56.388 +Aug 24 12:03:56.403: INFO: Namespace "namespaces-5952" now has labels, map[string]string{"e2e-framework":"namespaces", "e2e-run":"e37f2036-3a54-4653-ada1-c01489d8d1f1", "kubernetes.io/metadata.name":"namespaces-5952", "namespaces-5952":"updated", "pod-security.kubernetes.io/enforce":"baseline"} +[AfterEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 15:57:24.608: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Job +Aug 24 12:03:56.403: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Job +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Job +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "job-5257" for this suite. 07/29/23 15:57:24.62 +STEP: Destroying namespace "namespaces-5952" for this suite. 08/24/23 12:03:56.412 ------------------------------ -• [2.221 seconds] -[sig-apps] Job -test/e2e/apps/framework.go:23 - should apply changes to a job status [Conformance] - test/e2e/apps/job.go:636 +• [0.077 seconds] +[sig-api-machinery] Namespaces [Serial] +test/e2e/apimachinery/framework.go:23 + should apply an update to a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:366 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Job + [BeforeEach] [sig-api-machinery] Namespaces [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:57:22.42 - Jul 29 15:57:22.420: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename job 07/29/23 15:57:22.433 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:22.474 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:22.48 - [BeforeEach] [sig-apps] Job + STEP: Creating a kubernetes client 08/24/23 12:03:56.346 + Aug 24 12:03:56.346: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename namespaces 08/24/23 12:03:56.348 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:56.377 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:56.383 + [BeforeEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should apply changes to a job status [Conformance] - test/e2e/apps/job.go:636 - STEP: Creating a job 07/29/23 15:57:22.489 - STEP: Ensure pods equal to parallelism count is attached to the job 07/29/23 15:57:22.51 - STEP: patching /status 07/29/23 15:57:24.521 - STEP: updating /status 07/29/23 15:57:24.539 - STEP: get /status 07/29/23 15:57:24.599 - [AfterEach] [sig-apps] Job + [It] should apply an update to a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:366 + STEP: Updating Namespace "namespaces-5952" 08/24/23 12:03:56.388 + Aug 24 12:03:56.403: INFO: Namespace "namespaces-5952" now has labels, map[string]string{"e2e-framework":"namespaces", "e2e-run":"e37f2036-3a54-4653-ada1-c01489d8d1f1", "kubernetes.io/metadata.name":"namespaces-5952", "namespaces-5952":"updated", "pod-security.kubernetes.io/enforce":"baseline"} + [AfterEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 15:57:24.608: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Job + Aug 24 12:03:56.403: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Job + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Job + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "job-5257" for this suite. 07/29/23 15:57:24.62 + STEP: Destroying namespace "namespaces-5952" for this suite. 08/24/23 12:03:56.412 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSS ------------------------------ -[sig-apps] Deployment - should run the lifecycle of a Deployment [Conformance] - test/e2e/apps/deployment.go:185 -[BeforeEach] [sig-apps] Deployment +[sig-node] Security Context When creating a pod with privileged + should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:528 +[BeforeEach] [sig-node] Security Context set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:57:24.638 -Jul 29 15:57:24.638: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename deployment 07/29/23 15:57:24.642 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:24.678 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:24.684 -[BeforeEach] [sig-apps] Deployment +STEP: Creating a kubernetes client 08/24/23 12:03:56.428 +Aug 24 12:03:56.429: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename security-context-test 08/24/23 12:03:56.43 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:56.46 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:56.466 +[BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 -[It] should run the lifecycle of a Deployment [Conformance] - test/e2e/apps/deployment.go:185 -STEP: creating a Deployment 07/29/23 15:57:24.698 -STEP: waiting for Deployment to be created 07/29/23 15:57:24.71 -STEP: waiting for all Replicas to be Ready 07/29/23 15:57:24.713 -Jul 29 15:57:24.716: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Jul 29 15:57:24.716: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Jul 29 15:57:24.738: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Jul 29 15:57:24.738: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Jul 29 15:57:24.802: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Jul 29 15:57:24.802: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Jul 29 15:57:24.864: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Jul 29 15:57:24.865: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] -Jul 29 15:57:26.374: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment-static:true] -Jul 29 15:57:26.374: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment-static:true] -Jul 29 15:57:26.498: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 and labels map[test-deployment-static:true] -STEP: patching the Deployment 07/29/23 15:57:26.498 -W0729 15:57:26.519050 13 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" -Jul 29 15:57:26.522: INFO: observed event type ADDED -STEP: waiting for Replicas to scale 07/29/23 15:57:26.522 -Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 -Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 -Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 -Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 -Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 -Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 -Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 -Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 -Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:26.545: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:26.545: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:26.584: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:26.584: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:26.613: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:26.614: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:26.666: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:26.666: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:28.605: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:28.605: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:28.640: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -STEP: listing Deployments 07/29/23 15:57:28.64 -Jul 29 15:57:28.648: INFO: Found test-deployment with labels: map[test-deployment:patched test-deployment-static:true] -STEP: updating the Deployment 07/29/23 15:57:28.648 -Jul 29 15:57:28.673: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -STEP: fetching the DeploymentStatus 07/29/23 15:57:28.673 -Jul 29 15:57:28.698: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -Jul 29 15:57:28.722: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -Jul 29 15:57:28.812: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -Jul 29 15:57:28.865: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -Jul 29 15:57:28.888: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] -Jul 29 15:57:30.269: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] -Jul 29 15:57:30.553: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 3 and labels map[test-deployment:updated test-deployment-static:true] -Jul 29 15:57:30.661: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] -Jul 29 15:57:30.692: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] -Jul 29 15:57:32.434: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 3 and labels map[test-deployment:updated test-deployment-static:true] -STEP: patching the DeploymentStatus 07/29/23 15:57:32.528 -STEP: fetching the DeploymentStatus 07/29/23 15:57:32.545 -Jul 29 15:57:32.557: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:32.558: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:32.559: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:32.559: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:32.559: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 -Jul 29 15:57:32.559: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:32.560: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 3 -Jul 29 15:57:32.560: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:32.560: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 -Jul 29 15:57:32.560: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 3 -STEP: deleting the Deployment 07/29/23 15:57:32.561 -Jul 29 15:57:32.582: INFO: observed event type MODIFIED -Jul 29 15:57:32.583: INFO: observed event type MODIFIED -Jul 29 15:57:32.583: INFO: observed event type MODIFIED -Jul 29 15:57:32.583: INFO: observed event type MODIFIED -Jul 29 15:57:32.583: INFO: observed event type MODIFIED -Jul 29 15:57:32.584: INFO: observed event type MODIFIED -Jul 29 15:57:32.585: INFO: observed event type MODIFIED -Jul 29 15:57:32.585: INFO: observed event type MODIFIED -Jul 29 15:57:32.586: INFO: observed event type MODIFIED -Jul 29 15:57:32.586: INFO: observed event type MODIFIED -Jul 29 15:57:32.587: INFO: observed event type MODIFIED -[AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 -Jul 29 15:57:32.594: INFO: Log out all the ReplicaSets if there is no deployment created -Jul 29 15:57:32.608: INFO: ReplicaSet "test-deployment-7b7876f9d6": -&ReplicaSet{ObjectMeta:{test-deployment-7b7876f9d6 deployment-3086 b93ebabf-936a-49ad-a622-d103701a509e 12438 2 2023-07-29 15:57:28 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:3] [{apps/v1 Deployment test-deployment c355c0ae-3051-4b1e-bff0-97b3e8757f5c 0xc00518c2f7 0xc00518c2f8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 15:57:30 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c355c0ae-3051-4b1e-bff0-97b3e8757f5c\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 15:57:32 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 7b7876f9d6,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00518c380 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:2,FullyLabeledReplicas:2,ObservedGeneration:2,ReadyReplicas:2,AvailableReplicas:2,Conditions:[]ReplicaSetCondition{},},} - -Jul 29 15:57:32.615: INFO: pod: "test-deployment-7b7876f9d6-k8b4f": -&Pod{ObjectMeta:{test-deployment-7b7876f9d6-k8b4f test-deployment-7b7876f9d6- deployment-3086 cc130eff-4a6f-4370-a14b-e63c7d94799f 12437 0 2023-07-29 15:57:30 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7b7876f9d6 b93ebabf-936a-49ad-a622-d103701a509e 0xc001f122c7 0xc001f122c8}] [] [{kube-controller-manager Update v1 2023-07-29 15:57:30 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b93ebabf-936a-49ad-a622-d103701a509e\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 15:57:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.131\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-pgl4s,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pgl4s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:10.233.65.131,StartTime:2023-07-29 15:57:30 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 15:57:31 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://1960a4b999668b683688a39ce30766738ca2a8e7f79067c05e23817d15d5ad61,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.131,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - -Jul 29 15:57:32.615: INFO: pod: "test-deployment-7b7876f9d6-nd5bj": -&Pod{ObjectMeta:{test-deployment-7b7876f9d6-nd5bj test-deployment-7b7876f9d6- deployment-3086 26173351-4f51-4984-adcc-673d4d444ac2 12404 0 2023-07-29 15:57:28 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7b7876f9d6 b93ebabf-936a-49ad-a622-d103701a509e 0xc001f124b7 0xc001f124b8}] [] [{kube-controller-manager Update v1 2023-07-29 15:57:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b93ebabf-936a-49ad-a622-d103701a509e\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 15:57:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.20\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-52rcn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-52rcn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.20,StartTime:2023-07-29 15:57:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 15:57:29 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://5869d550081cb941256a2b90ab3d750c84765b19d32a2cdbf35d8e92d188fa1c,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.20,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - -Jul 29 15:57:32.615: INFO: ReplicaSet "test-deployment-7df74c55ff": -&ReplicaSet{ObjectMeta:{test-deployment-7df74c55ff deployment-3086 ee7489fa-4e28-4d6a-9d05-3abc9a5b2f29 12446 4 2023-07-29 15:57:26 +0000 UTC map[pod-template-hash:7df74c55ff test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-deployment c355c0ae-3051-4b1e-bff0-97b3e8757f5c 0xc00518c3e7 0xc00518c3e8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 15:57:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c355c0ae-3051-4b1e-bff0-97b3e8757f5c\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 15:57:32 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 7df74c55ff,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:7df74c55ff test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/pause:3.9 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00518c480 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:4,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - -Jul 29 15:57:32.623: INFO: pod: "test-deployment-7df74c55ff-chp4x": -&Pod{ObjectMeta:{test-deployment-7df74c55ff-chp4x test-deployment-7df74c55ff- deployment-3086 acd180c6-486f-42e1-a130-46b1893ea7c3 12442 0 2023-07-29 15:57:28 +0000 UTC 2023-07-29 15:57:33 +0000 UTC 0xc001f13ae8 map[pod-template-hash:7df74c55ff test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7df74c55ff ee7489fa-4e28-4d6a-9d05-3abc9a5b2f29 0xc001f13b17 0xc001f13b18}] [] [{kube-controller-manager Update v1 2023-07-29 15:57:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ee7489fa-4e28-4d6a-9d05-3abc9a5b2f29\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 15:57:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.183\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-blgtz,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/pause:3.9,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-blgtz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:10.233.64.183,StartTime:2023-07-29 15:57:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 15:57:29 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/pause:3.9,ImageID:registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097,ContainerID:cri-o://7f075f9f48b7117ccb548a9beeabfa58ecff5b62e9b1f5e53b7aea4697d615fd,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.183,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - -Jul 29 15:57:32.624: INFO: ReplicaSet "test-deployment-f4dbc4647": -&ReplicaSet{ObjectMeta:{test-deployment-f4dbc4647 deployment-3086 8e289fdd-4747-4370-a0a3-4b434f70389a 12335 3 2023-07-29 15:57:24 +0000 UTC map[pod-template-hash:f4dbc4647 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment c355c0ae-3051-4b1e-bff0-97b3e8757f5c 0xc00518c4e7 0xc00518c4e8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 15:57:28 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c355c0ae-3051-4b1e-bff0-97b3e8757f5c\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 15:57:28 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: f4dbc4647,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:f4dbc4647 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00518c580 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:3,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - -[AfterEach] [sig-apps] Deployment +[BeforeEach] [sig-node] Security Context + test/e2e/common/node/security_context.go:50 +[It] should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:528 +Aug 24 12:03:56.487: INFO: Waiting up to 5m0s for pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112" in namespace "security-context-test-849" to be "Succeeded or Failed" +Aug 24 12:03:56.493: INFO: Pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112": Phase="Pending", Reason="", readiness=false. Elapsed: 5.890233ms +Aug 24 12:03:58.500: INFO: Pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012621821s +Aug 24 12:04:00.501: INFO: Pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013446268s +Aug 24 12:04:00.501: INFO: Pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112" satisfied condition "Succeeded or Failed" +Aug 24 12:04:00.512: INFO: Got logs for pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112": "ip: RTNETLINK answers: Operation not permitted\n" +[AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 -Jul 29 15:57:32.638: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Deployment +Aug 24 12:04:00.513: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 -STEP: Destroying namespace "deployment-3086" for this suite. 07/29/23 15:57:32.658 +STEP: Destroying namespace "security-context-test-849" for this suite. 08/24/23 12:04:00.522 ------------------------------ -• [SLOW TEST] [8.038 seconds] -[sig-apps] Deployment -test/e2e/apps/framework.go:23 - should run the lifecycle of a Deployment [Conformance] - test/e2e/apps/deployment.go:185 +• [4.104 seconds] +[sig-node] Security Context +test/e2e/common/node/framework.go:23 + When creating a pod with privileged + test/e2e/common/node/security_context.go:491 + should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:528 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Deployment + [BeforeEach] [sig-node] Security Context set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:57:24.638 - Jul 29 15:57:24.638: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename deployment 07/29/23 15:57:24.642 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:24.678 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:24.684 - [BeforeEach] [sig-apps] Deployment + STEP: Creating a kubernetes client 08/24/23 12:03:56.428 + Aug 24 12:03:56.429: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename security-context-test 08/24/23 12:03:56.43 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:03:56.46 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:03:56.466 + [BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 - [It] should run the lifecycle of a Deployment [Conformance] - test/e2e/apps/deployment.go:185 - STEP: creating a Deployment 07/29/23 15:57:24.698 - STEP: waiting for Deployment to be created 07/29/23 15:57:24.71 - STEP: waiting for all Replicas to be Ready 07/29/23 15:57:24.713 - Jul 29 15:57:24.716: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] - Jul 29 15:57:24.716: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] - Jul 29 15:57:24.738: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] - Jul 29 15:57:24.738: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] - Jul 29 15:57:24.802: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] - Jul 29 15:57:24.802: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] - Jul 29 15:57:24.864: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] - Jul 29 15:57:24.865: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 and labels map[test-deployment-static:true] - Jul 29 15:57:26.374: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment-static:true] - Jul 29 15:57:26.374: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment-static:true] - Jul 29 15:57:26.498: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 and labels map[test-deployment-static:true] - STEP: patching the Deployment 07/29/23 15:57:26.498 - W0729 15:57:26.519050 13 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" - Jul 29 15:57:26.522: INFO: observed event type ADDED - STEP: waiting for Replicas to scale 07/29/23 15:57:26.522 - Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 - Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 - Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 - Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 - Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 - Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 - Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 - Jul 29 15:57:26.528: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 0 - Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:26.529: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:26.545: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:26.545: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:26.584: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:26.584: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:26.613: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:26.614: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:26.666: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:26.666: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:28.605: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:28.605: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:28.640: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - STEP: listing Deployments 07/29/23 15:57:28.64 - Jul 29 15:57:28.648: INFO: Found test-deployment with labels: map[test-deployment:patched test-deployment-static:true] - STEP: updating the Deployment 07/29/23 15:57:28.648 - Jul 29 15:57:28.673: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - STEP: fetching the DeploymentStatus 07/29/23 15:57:28.673 - Jul 29 15:57:28.698: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] - Jul 29 15:57:28.722: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] - Jul 29 15:57:28.812: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] - Jul 29 15:57:28.865: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] - Jul 29 15:57:28.888: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] - Jul 29 15:57:30.269: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] - Jul 29 15:57:30.553: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 3 and labels map[test-deployment:updated test-deployment-static:true] - Jul 29 15:57:30.661: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] - Jul 29 15:57:30.692: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] - Jul 29 15:57:32.434: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 3 and labels map[test-deployment:updated test-deployment-static:true] - STEP: patching the DeploymentStatus 07/29/23 15:57:32.528 - STEP: fetching the DeploymentStatus 07/29/23 15:57:32.545 - Jul 29 15:57:32.557: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:32.558: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:32.559: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:32.559: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:32.559: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 1 - Jul 29 15:57:32.559: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:32.560: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 3 - Jul 29 15:57:32.560: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:32.560: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 2 - Jul 29 15:57:32.560: INFO: observed Deployment test-deployment in namespace deployment-3086 with ReadyReplicas 3 - STEP: deleting the Deployment 07/29/23 15:57:32.561 - Jul 29 15:57:32.582: INFO: observed event type MODIFIED - Jul 29 15:57:32.583: INFO: observed event type MODIFIED - Jul 29 15:57:32.583: INFO: observed event type MODIFIED - Jul 29 15:57:32.583: INFO: observed event type MODIFIED - Jul 29 15:57:32.583: INFO: observed event type MODIFIED - Jul 29 15:57:32.584: INFO: observed event type MODIFIED - Jul 29 15:57:32.585: INFO: observed event type MODIFIED - Jul 29 15:57:32.585: INFO: observed event type MODIFIED - Jul 29 15:57:32.586: INFO: observed event type MODIFIED - Jul 29 15:57:32.586: INFO: observed event type MODIFIED - Jul 29 15:57:32.587: INFO: observed event type MODIFIED - [AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 - Jul 29 15:57:32.594: INFO: Log out all the ReplicaSets if there is no deployment created - Jul 29 15:57:32.608: INFO: ReplicaSet "test-deployment-7b7876f9d6": - &ReplicaSet{ObjectMeta:{test-deployment-7b7876f9d6 deployment-3086 b93ebabf-936a-49ad-a622-d103701a509e 12438 2 2023-07-29 15:57:28 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:3] [{apps/v1 Deployment test-deployment c355c0ae-3051-4b1e-bff0-97b3e8757f5c 0xc00518c2f7 0xc00518c2f8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 15:57:30 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c355c0ae-3051-4b1e-bff0-97b3e8757f5c\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 15:57:32 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 7b7876f9d6,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00518c380 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:2,FullyLabeledReplicas:2,ObservedGeneration:2,ReadyReplicas:2,AvailableReplicas:2,Conditions:[]ReplicaSetCondition{},},} - - Jul 29 15:57:32.615: INFO: pod: "test-deployment-7b7876f9d6-k8b4f": - &Pod{ObjectMeta:{test-deployment-7b7876f9d6-k8b4f test-deployment-7b7876f9d6- deployment-3086 cc130eff-4a6f-4370-a14b-e63c7d94799f 12437 0 2023-07-29 15:57:30 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7b7876f9d6 b93ebabf-936a-49ad-a622-d103701a509e 0xc001f122c7 0xc001f122c8}] [] [{kube-controller-manager Update v1 2023-07-29 15:57:30 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b93ebabf-936a-49ad-a622-d103701a509e\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 15:57:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.131\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-pgl4s,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pgl4s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:10.233.65.131,StartTime:2023-07-29 15:57:30 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 15:57:31 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://1960a4b999668b683688a39ce30766738ca2a8e7f79067c05e23817d15d5ad61,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.131,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - - Jul 29 15:57:32.615: INFO: pod: "test-deployment-7b7876f9d6-nd5bj": - &Pod{ObjectMeta:{test-deployment-7b7876f9d6-nd5bj test-deployment-7b7876f9d6- deployment-3086 26173351-4f51-4984-adcc-673d4d444ac2 12404 0 2023-07-29 15:57:28 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7b7876f9d6 b93ebabf-936a-49ad-a622-d103701a509e 0xc001f124b7 0xc001f124b8}] [] [{kube-controller-manager Update v1 2023-07-29 15:57:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b93ebabf-936a-49ad-a622-d103701a509e\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 15:57:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.20\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-52rcn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-52rcn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.20,StartTime:2023-07-29 15:57:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 15:57:29 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://5869d550081cb941256a2b90ab3d750c84765b19d32a2cdbf35d8e92d188fa1c,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.20,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - - Jul 29 15:57:32.615: INFO: ReplicaSet "test-deployment-7df74c55ff": - &ReplicaSet{ObjectMeta:{test-deployment-7df74c55ff deployment-3086 ee7489fa-4e28-4d6a-9d05-3abc9a5b2f29 12446 4 2023-07-29 15:57:26 +0000 UTC map[pod-template-hash:7df74c55ff test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-deployment c355c0ae-3051-4b1e-bff0-97b3e8757f5c 0xc00518c3e7 0xc00518c3e8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 15:57:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c355c0ae-3051-4b1e-bff0-97b3e8757f5c\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 15:57:32 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 7df74c55ff,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:7df74c55ff test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/pause:3.9 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00518c480 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:4,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - - Jul 29 15:57:32.623: INFO: pod: "test-deployment-7df74c55ff-chp4x": - &Pod{ObjectMeta:{test-deployment-7df74c55ff-chp4x test-deployment-7df74c55ff- deployment-3086 acd180c6-486f-42e1-a130-46b1893ea7c3 12442 0 2023-07-29 15:57:28 +0000 UTC 2023-07-29 15:57:33 +0000 UTC 0xc001f13ae8 map[pod-template-hash:7df74c55ff test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7df74c55ff ee7489fa-4e28-4d6a-9d05-3abc9a5b2f29 0xc001f13b17 0xc001f13b18}] [] [{kube-controller-manager Update v1 2023-07-29 15:57:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ee7489fa-4e28-4d6a-9d05-3abc9a5b2f29\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 15:57:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.183\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-blgtz,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/pause:3.9,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-blgtz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 15:57:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:10.233.64.183,StartTime:2023-07-29 15:57:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 15:57:29 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/pause:3.9,ImageID:registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097,ContainerID:cri-o://7f075f9f48b7117ccb548a9beeabfa58ecff5b62e9b1f5e53b7aea4697d615fd,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.183,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - - Jul 29 15:57:32.624: INFO: ReplicaSet "test-deployment-f4dbc4647": - &ReplicaSet{ObjectMeta:{test-deployment-f4dbc4647 deployment-3086 8e289fdd-4747-4370-a0a3-4b434f70389a 12335 3 2023-07-29 15:57:24 +0000 UTC map[pod-template-hash:f4dbc4647 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment c355c0ae-3051-4b1e-bff0-97b3e8757f5c 0xc00518c4e7 0xc00518c4e8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 15:57:28 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c355c0ae-3051-4b1e-bff0-97b3e8757f5c\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 15:57:28 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: f4dbc4647,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:f4dbc4647 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00518c580 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:3,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - - [AfterEach] [sig-apps] Deployment + [BeforeEach] [sig-node] Security Context + test/e2e/common/node/security_context.go:50 + [It] should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:528 + Aug 24 12:03:56.487: INFO: Waiting up to 5m0s for pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112" in namespace "security-context-test-849" to be "Succeeded or Failed" + Aug 24 12:03:56.493: INFO: Pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112": Phase="Pending", Reason="", readiness=false. Elapsed: 5.890233ms + Aug 24 12:03:58.500: INFO: Pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012621821s + Aug 24 12:04:00.501: INFO: Pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013446268s + Aug 24 12:04:00.501: INFO: Pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112" satisfied condition "Succeeded or Failed" + Aug 24 12:04:00.512: INFO: Got logs for pod "busybox-privileged-false-7b10d0cf-7d8c-4fd9-9390-a19de1db3112": "ip: RTNETLINK answers: Operation not permitted\n" + [AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 - Jul 29 15:57:32.638: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Deployment + Aug 24 12:04:00.513: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 - STEP: Destroying namespace "deployment-3086" for this suite. 07/29/23 15:57:32.658 + STEP: Destroying namespace "security-context-test-849" for this suite. 08/24/23 12:04:00.522 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSS +SSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:207 -[BeforeEach] [sig-storage] EmptyDir volumes +[sig-apps] Daemon set [Serial] + should run and stop complex daemon [Conformance] + test/e2e/apps/daemon_set.go:205 +[BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:57:32.682 -Jul 29 15:57:32.682: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 15:57:32.684 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:32.72 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:32.725 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 12:04:00.536 +Aug 24 12:04:00.536: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename daemonsets 08/24/23 12:04:00.538 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:04:00.568 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:04:00.574 +[BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:207 -STEP: Creating a pod to test emptydir 0666 on node default medium 07/29/23 15:57:32.733 -Jul 29 15:57:32.748: INFO: Waiting up to 5m0s for pod "pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd" in namespace "emptydir-7280" to be "Succeeded or Failed" -Jul 29 15:57:32.765: INFO: Pod "pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd": Phase="Pending", Reason="", readiness=false. Elapsed: 16.558393ms -Jul 29 15:57:34.773: INFO: Pod "pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024955935s -Jul 29 15:57:36.776: INFO: Pod "pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02798292s -STEP: Saw pod success 07/29/23 15:57:36.777 -Jul 29 15:57:36.777: INFO: Pod "pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd" satisfied condition "Succeeded or Failed" -Jul 29 15:57:36.785: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd container test-container: -STEP: delete the pod 07/29/23 15:57:36.809 -Jul 29 15:57:36.840: INFO: Waiting for pod pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd to disappear -Jul 29 15:57:36.846: INFO: Pod pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 +[It] should run and stop complex daemon [Conformance] + test/e2e/apps/daemon_set.go:205 +Aug 24 12:04:00.625: INFO: Creating daemon "daemon-set" with a node selector +STEP: Initially, daemon pods should not be running on any nodes. 08/24/23 12:04:00.634 +Aug 24 12:04:00.640: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:04:00.640: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set +STEP: Change node label to blue, check that daemon pod is launched. 08/24/23 12:04:00.64 +Aug 24 12:04:00.681: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:04:00.681: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:04:01.690: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:04:01.690: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:04:02.692: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:04:02.692: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:04:03.690: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 +Aug 24 12:04:03.690: INFO: Number of running nodes: 1, number of available pods: 1 in daemonset daemon-set +STEP: Update the node label to green, and wait for daemons to be unscheduled 08/24/23 12:04:03.697 +Aug 24 12:04:03.731: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 +Aug 24 12:04:03.731: INFO: Number of running nodes: 0, number of available pods: 1 in daemonset daemon-set +Aug 24 12:04:04.740: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:04:04.740: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set +STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate 08/24/23 12:04:04.74 +Aug 24 12:04:04.771: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:04:04.771: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:04:05.779: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:04:05.779: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:04:06.783: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:04:06.783: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:04:07.780: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:04:07.780: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:04:08.780: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 +Aug 24 12:04:08.780: INFO: Number of running nodes: 1, number of available pods: 1 in daemonset daemon-set +[AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 +STEP: Deleting DaemonSet "daemon-set" 08/24/23 12:04:08.795 +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-2169, will wait for the garbage collector to delete the pods 08/24/23 12:04:08.796 +Aug 24 12:04:08.866: INFO: Deleting DaemonSet.extensions daemon-set took: 12.602697ms +Aug 24 12:04:08.967: INFO: Terminating DaemonSet.extensions daemon-set pods took: 101.099951ms +Aug 24 12:04:11.481: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:04:11.481: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set +Aug 24 12:04:11.488: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"10987"},"items":null} + +Aug 24 12:04:11.494: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"10987"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 15:57:36.846: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 12:04:11.572: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-7280" for this suite. 07/29/23 15:57:36.86 +STEP: Destroying namespace "daemonsets-2169" for this suite. 08/24/23 12:04:11.581 ------------------------------ -• [4.192 seconds] -[sig-storage] EmptyDir volumes -test/e2e/common/storage/framework.go:23 - should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:207 +• [SLOW TEST] [11.055 seconds] +[sig-apps] Daemon set [Serial] +test/e2e/apps/framework.go:23 + should run and stop complex daemon [Conformance] + test/e2e/apps/daemon_set.go:205 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:57:32.682 - Jul 29 15:57:32.682: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 15:57:32.684 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:32.72 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:32.725 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 12:04:00.536 + Aug 24 12:04:00.536: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename daemonsets 08/24/23 12:04:00.538 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:04:00.568 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:04:00.574 + [BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:207 - STEP: Creating a pod to test emptydir 0666 on node default medium 07/29/23 15:57:32.733 - Jul 29 15:57:32.748: INFO: Waiting up to 5m0s for pod "pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd" in namespace "emptydir-7280" to be "Succeeded or Failed" - Jul 29 15:57:32.765: INFO: Pod "pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd": Phase="Pending", Reason="", readiness=false. Elapsed: 16.558393ms - Jul 29 15:57:34.773: INFO: Pod "pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024955935s - Jul 29 15:57:36.776: INFO: Pod "pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02798292s - STEP: Saw pod success 07/29/23 15:57:36.777 - Jul 29 15:57:36.777: INFO: Pod "pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd" satisfied condition "Succeeded or Failed" - Jul 29 15:57:36.785: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd container test-container: - STEP: delete the pod 07/29/23 15:57:36.809 - Jul 29 15:57:36.840: INFO: Waiting for pod pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd to disappear - Jul 29 15:57:36.846: INFO: Pod pod-63cdcf0e-0988-4f3d-b44b-263927e55dfd no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 + [It] should run and stop complex daemon [Conformance] + test/e2e/apps/daemon_set.go:205 + Aug 24 12:04:00.625: INFO: Creating daemon "daemon-set" with a node selector + STEP: Initially, daemon pods should not be running on any nodes. 08/24/23 12:04:00.634 + Aug 24 12:04:00.640: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:04:00.640: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set + STEP: Change node label to blue, check that daemon pod is launched. 08/24/23 12:04:00.64 + Aug 24 12:04:00.681: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:04:00.681: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:04:01.690: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:04:01.690: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:04:02.692: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:04:02.692: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:04:03.690: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 + Aug 24 12:04:03.690: INFO: Number of running nodes: 1, number of available pods: 1 in daemonset daemon-set + STEP: Update the node label to green, and wait for daemons to be unscheduled 08/24/23 12:04:03.697 + Aug 24 12:04:03.731: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 + Aug 24 12:04:03.731: INFO: Number of running nodes: 0, number of available pods: 1 in daemonset daemon-set + Aug 24 12:04:04.740: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:04:04.740: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set + STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate 08/24/23 12:04:04.74 + Aug 24 12:04:04.771: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:04:04.771: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:04:05.779: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:04:05.779: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:04:06.783: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:04:06.783: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:04:07.780: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:04:07.780: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:04:08.780: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 + Aug 24 12:04:08.780: INFO: Number of running nodes: 1, number of available pods: 1 in daemonset daemon-set + [AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 + STEP: Deleting DaemonSet "daemon-set" 08/24/23 12:04:08.795 + STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-2169, will wait for the garbage collector to delete the pods 08/24/23 12:04:08.796 + Aug 24 12:04:08.866: INFO: Deleting DaemonSet.extensions daemon-set took: 12.602697ms + Aug 24 12:04:08.967: INFO: Terminating DaemonSet.extensions daemon-set pods took: 101.099951ms + Aug 24 12:04:11.481: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:04:11.481: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set + Aug 24 12:04:11.488: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"10987"},"items":null} + + Aug 24 12:04:11.494: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"10987"},"items":null} + + [AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 15:57:36.846: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 12:04:11.572: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-7280" for this suite. 07/29/23 15:57:36.86 + STEP: Destroying namespace "daemonsets-2169" for this suite. 08/24/23 12:04:11.581 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - should be able to convert a non homogeneous list of CRs [Conformance] - test/e2e/apimachinery/crd_conversion_webhook.go:184 -[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +[sig-network] Networking Granular Checks: Pods + should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:105 +[BeforeEach] [sig-network] Networking set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:57:36.878 -Jul 29 15:57:36.878: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-webhook 07/29/23 15:57:36.88 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:36.919 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:36.942 -[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:04:11.597 +Aug 24 12:04:11.597: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pod-network-test 08/24/23 12:04:11.599 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:04:11.641 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:04:11.646 +[BeforeEach] [sig-network] Networking test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:128 -STEP: Setting up server cert 07/29/23 15:57:36.948 -STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication 07/29/23 15:57:37.681 -STEP: Deploying the custom resource conversion webhook pod 07/29/23 15:57:37.706 -STEP: Wait for the deployment to be ready 07/29/23 15:57:37.733 -Jul 29 15:57:37.750: INFO: new replicaset for deployment "sample-crd-conversion-webhook-deployment" is yet to be created -STEP: Deploying the webhook service 07/29/23 15:57:39.77 -STEP: Verifying the service has paired with the endpoint 07/29/23 15:57:39.792 -Jul 29 15:57:40.793: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 -[It] should be able to convert a non homogeneous list of CRs [Conformance] - test/e2e/apimachinery/crd_conversion_webhook.go:184 -Jul 29 15:57:40.805: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Creating a v1 custom resource 07/29/23 15:57:43.634 -STEP: Create a v2 custom resource 07/29/23 15:57:43.674 -STEP: List CRs in v1 07/29/23 15:57:43.69 -STEP: List CRs in v2 07/29/23 15:57:43.703 -[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +[It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:105 +STEP: Performing setup for networking test in namespace pod-network-test-818 08/24/23 12:04:11.652 +STEP: creating a selector 08/24/23 12:04:11.653 +STEP: Creating the service pods in kubernetes 08/24/23 12:04:11.653 +Aug 24 12:04:11.653: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Aug 24 12:04:11.713: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-818" to be "running and ready" +Aug 24 12:04:11.720: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 6.992813ms +Aug 24 12:04:11.721: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:04:13.729: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.016074344s +Aug 24 12:04:13.729: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 12:04:15.729: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.016243222s +Aug 24 12:04:15.730: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 12:04:17.730: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.016556208s +Aug 24 12:04:17.730: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 12:04:19.731: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.018140404s +Aug 24 12:04:19.731: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 12:04:21.728: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.015181878s +Aug 24 12:04:21.729: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 12:04:23.730: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 12.017018802s +Aug 24 12:04:23.730: INFO: The phase of Pod netserver-0 is Running (Ready = true) +Aug 24 12:04:23.730: INFO: Pod "netserver-0" satisfied condition "running and ready" +Aug 24 12:04:23.735: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-818" to be "running and ready" +Aug 24 12:04:23.740: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 5.222563ms +Aug 24 12:04:23.740: INFO: The phase of Pod netserver-1 is Running (Ready = true) +Aug 24 12:04:23.741: INFO: Pod "netserver-1" satisfied condition "running and ready" +Aug 24 12:04:23.746: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-818" to be "running and ready" +Aug 24 12:04:23.752: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.130213ms +Aug 24 12:04:23.752: INFO: The phase of Pod netserver-2 is Running (Ready = true) +Aug 24 12:04:23.752: INFO: Pod "netserver-2" satisfied condition "running and ready" +STEP: Creating test pods 08/24/23 12:04:23.758 +Aug 24 12:04:23.780: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-818" to be "running" +Aug 24 12:04:23.786: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 5.481253ms +Aug 24 12:04:25.795: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.014726039s +Aug 24 12:04:25.796: INFO: Pod "test-container-pod" satisfied condition "running" +Aug 24 12:04:25.803: INFO: Waiting up to 5m0s for pod "host-test-container-pod" in namespace "pod-network-test-818" to be "running" +Aug 24 12:04:25.811: INFO: Pod "host-test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 7.557919ms +Aug 24 12:04:25.811: INFO: Pod "host-test-container-pod" satisfied condition "running" +Aug 24 12:04:25.819: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 +Aug 24 12:04:25.819: INFO: Going to poll 10.233.64.193 on port 8083 at least 0 times, with a maximum of 39 tries before failing +Aug 24 12:04:25.827: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.64.193:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-818 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:04:25.828: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:04:25.830: INFO: ExecWithOptions: Clientset creation +Aug 24 12:04:25.830: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-818/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.64.193%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) +Aug 24 12:04:26.012: INFO: Found all 1 expected endpoints: [netserver-0] +Aug 24 12:04:26.012: INFO: Going to poll 10.233.65.33 on port 8083 at least 0 times, with a maximum of 39 tries before failing +Aug 24 12:04:26.019: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.65.33:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-818 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:04:26.019: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:04:26.020: INFO: ExecWithOptions: Clientset creation +Aug 24 12:04:26.021: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-818/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.65.33%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) +Aug 24 12:04:26.178: INFO: Found all 1 expected endpoints: [netserver-1] +Aug 24 12:04:26.178: INFO: Going to poll 10.233.66.52 on port 8083 at least 0 times, with a maximum of 39 tries before failing +Aug 24 12:04:26.186: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.66.52:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-818 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:04:26.186: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:04:26.188: INFO: ExecWithOptions: Clientset creation +Aug 24 12:04:26.188: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-818/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.66.52%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) +Aug 24 12:04:26.304: INFO: Found all 1 expected endpoints: [netserver-2] +[AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 -Jul 29 15:57:44.526: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:139 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +Aug 24 12:04:26.305: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Networking test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193 -STEP: Destroying namespace "crd-webhook-5002" for this suite. 07/29/23 15:57:44.67 +STEP: Destroying namespace "pod-network-test-818" for this suite. 08/24/23 12:04:26.317 ------------------------------ -• [SLOW TEST] [7.838 seconds] -[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should be able to convert a non homogeneous list of CRs [Conformance] - test/e2e/apimachinery/crd_conversion_webhook.go:184 +• [SLOW TEST] [14.738 seconds] +[sig-network] Networking +test/e2e/common/network/framework.go:23 + Granular Checks: Pods + test/e2e/common/network/networking.go:32 + should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:105 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-network] Networking set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:57:36.878 - Jul 29 15:57:36.878: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-webhook 07/29/23 15:57:36.88 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:36.919 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:36.942 - [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:04:11.597 + Aug 24 12:04:11.597: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pod-network-test 08/24/23 12:04:11.599 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:04:11.641 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:04:11.646 + [BeforeEach] [sig-network] Networking test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:128 - STEP: Setting up server cert 07/29/23 15:57:36.948 - STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication 07/29/23 15:57:37.681 - STEP: Deploying the custom resource conversion webhook pod 07/29/23 15:57:37.706 - STEP: Wait for the deployment to be ready 07/29/23 15:57:37.733 - Jul 29 15:57:37.750: INFO: new replicaset for deployment "sample-crd-conversion-webhook-deployment" is yet to be created - STEP: Deploying the webhook service 07/29/23 15:57:39.77 - STEP: Verifying the service has paired with the endpoint 07/29/23 15:57:39.792 - Jul 29 15:57:40.793: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 - [It] should be able to convert a non homogeneous list of CRs [Conformance] - test/e2e/apimachinery/crd_conversion_webhook.go:184 - Jul 29 15:57:40.805: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Creating a v1 custom resource 07/29/23 15:57:43.634 - STEP: Create a v2 custom resource 07/29/23 15:57:43.674 - STEP: List CRs in v1 07/29/23 15:57:43.69 - STEP: List CRs in v2 07/29/23 15:57:43.703 - [AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + [It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:105 + STEP: Performing setup for networking test in namespace pod-network-test-818 08/24/23 12:04:11.652 + STEP: creating a selector 08/24/23 12:04:11.653 + STEP: Creating the service pods in kubernetes 08/24/23 12:04:11.653 + Aug 24 12:04:11.653: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable + Aug 24 12:04:11.713: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-818" to be "running and ready" + Aug 24 12:04:11.720: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 6.992813ms + Aug 24 12:04:11.721: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:04:13.729: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.016074344s + Aug 24 12:04:13.729: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 12:04:15.729: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.016243222s + Aug 24 12:04:15.730: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 12:04:17.730: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.016556208s + Aug 24 12:04:17.730: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 12:04:19.731: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.018140404s + Aug 24 12:04:19.731: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 12:04:21.728: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.015181878s + Aug 24 12:04:21.729: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 12:04:23.730: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 12.017018802s + Aug 24 12:04:23.730: INFO: The phase of Pod netserver-0 is Running (Ready = true) + Aug 24 12:04:23.730: INFO: Pod "netserver-0" satisfied condition "running and ready" + Aug 24 12:04:23.735: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-818" to be "running and ready" + Aug 24 12:04:23.740: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 5.222563ms + Aug 24 12:04:23.740: INFO: The phase of Pod netserver-1 is Running (Ready = true) + Aug 24 12:04:23.741: INFO: Pod "netserver-1" satisfied condition "running and ready" + Aug 24 12:04:23.746: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-818" to be "running and ready" + Aug 24 12:04:23.752: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.130213ms + Aug 24 12:04:23.752: INFO: The phase of Pod netserver-2 is Running (Ready = true) + Aug 24 12:04:23.752: INFO: Pod "netserver-2" satisfied condition "running and ready" + STEP: Creating test pods 08/24/23 12:04:23.758 + Aug 24 12:04:23.780: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-818" to be "running" + Aug 24 12:04:23.786: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 5.481253ms + Aug 24 12:04:25.795: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.014726039s + Aug 24 12:04:25.796: INFO: Pod "test-container-pod" satisfied condition "running" + Aug 24 12:04:25.803: INFO: Waiting up to 5m0s for pod "host-test-container-pod" in namespace "pod-network-test-818" to be "running" + Aug 24 12:04:25.811: INFO: Pod "host-test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 7.557919ms + Aug 24 12:04:25.811: INFO: Pod "host-test-container-pod" satisfied condition "running" + Aug 24 12:04:25.819: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 + Aug 24 12:04:25.819: INFO: Going to poll 10.233.64.193 on port 8083 at least 0 times, with a maximum of 39 tries before failing + Aug 24 12:04:25.827: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.64.193:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-818 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:04:25.828: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:04:25.830: INFO: ExecWithOptions: Clientset creation + Aug 24 12:04:25.830: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-818/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.64.193%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) + Aug 24 12:04:26.012: INFO: Found all 1 expected endpoints: [netserver-0] + Aug 24 12:04:26.012: INFO: Going to poll 10.233.65.33 on port 8083 at least 0 times, with a maximum of 39 tries before failing + Aug 24 12:04:26.019: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.65.33:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-818 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:04:26.019: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:04:26.020: INFO: ExecWithOptions: Clientset creation + Aug 24 12:04:26.021: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-818/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.65.33%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) + Aug 24 12:04:26.178: INFO: Found all 1 expected endpoints: [netserver-1] + Aug 24 12:04:26.178: INFO: Going to poll 10.233.66.52 on port 8083 at least 0 times, with a maximum of 39 tries before failing + Aug 24 12:04:26.186: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.66.52:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-818 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:04:26.186: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:04:26.188: INFO: ExecWithOptions: Clientset creation + Aug 24 12:04:26.188: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-818/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.66.52%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) + Aug 24 12:04:26.304: INFO: Found all 1 expected endpoints: [netserver-2] + [AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 - Jul 29 15:57:44.526: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:139 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + Aug 24 12:04:26.305: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Networking test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193 - STEP: Destroying namespace "crd-webhook-5002" for this suite. 07/29/23 15:57:44.67 + STEP: Destroying namespace "pod-network-test-818" for this suite. 08/24/23 12:04:26.317 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSS ------------------------------ -[sig-node] Probing container - should have monotonically increasing restart count [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:199 -[BeforeEach] [sig-node] Probing container +[sig-scheduling] LimitRange + should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] + test/e2e/scheduling/limit_range.go:61 +[BeforeEach] [sig-scheduling] LimitRange set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 15:57:44.725 -Jul 29 15:57:44.725: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-probe 07/29/23 15:57:44.739 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:44.786 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:44.791 -[BeforeEach] [sig-node] Probing container +STEP: Creating a kubernetes client 08/24/23 12:04:26.339 +Aug 24 12:04:26.340: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename limitrange 08/24/23 12:04:26.342 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:04:26.377 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:04:26.387 +[BeforeEach] [sig-scheduling] LimitRange test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 -[It] should have monotonically increasing restart count [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:199 -STEP: Creating pod liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 in namespace container-probe-7510 07/29/23 15:57:44.801 -Jul 29 15:57:44.830: INFO: Waiting up to 5m0s for pod "liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762" in namespace "container-probe-7510" to be "not pending" -Jul 29 15:57:44.839: INFO: Pod "liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762": Phase="Pending", Reason="", readiness=false. Elapsed: 8.701141ms -Jul 29 15:57:46.849: INFO: Pod "liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762": Phase="Running", Reason="", readiness=true. Elapsed: 2.019068462s -Jul 29 15:57:46.850: INFO: Pod "liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762" satisfied condition "not pending" -Jul 29 15:57:46.850: INFO: Started pod liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 in namespace container-probe-7510 -STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 15:57:46.85 -Jul 29 15:57:46.858: INFO: Initial restart count of pod liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is 0 -Jul 29 15:58:06.981: INFO: Restart count of pod container-probe-7510/liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is now 1 (20.122284151s elapsed) -Jul 29 15:58:27.077: INFO: Restart count of pod container-probe-7510/liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is now 2 (40.218345019s elapsed) -Jul 29 15:58:47.183: INFO: Restart count of pod container-probe-7510/liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is now 3 (1m0.324218993s elapsed) -Jul 29 15:59:07.294: INFO: Restart count of pod container-probe-7510/liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is now 4 (1m20.435014119s elapsed) -Jul 29 16:00:07.585: INFO: Restart count of pod container-probe-7510/liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is now 5 (2m20.72599516s elapsed) -STEP: deleting the pod 07/29/23 16:00:07.585 -[AfterEach] [sig-node] Probing container +[It] should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] + test/e2e/scheduling/limit_range.go:61 +STEP: Creating a LimitRange 08/24/23 12:04:26.395 +STEP: Setting up watch 08/24/23 12:04:26.396 +STEP: Submitting a LimitRange 08/24/23 12:04:26.504 +STEP: Verifying LimitRange creation was observed 08/24/23 12:04:26.517 +STEP: Fetching the LimitRange to ensure it has proper values 08/24/23 12:04:26.518 +Aug 24 12:04:26.527: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] +Aug 24 12:04:26.527: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] +STEP: Creating a Pod with no resource requirements 08/24/23 12:04:26.527 +STEP: Ensuring Pod has resource requirements applied from LimitRange 08/24/23 12:04:26.537 +Aug 24 12:04:26.543: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] +Aug 24 12:04:26.543: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] +STEP: Creating a Pod with partial resource requirements 08/24/23 12:04:26.544 +STEP: Ensuring Pod has merged resource requirements applied from LimitRange 08/24/23 12:04:26.555 +Aug 24 12:04:26.566: INFO: Verifying requests: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] +Aug 24 12:04:26.566: INFO: Verifying limits: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] +STEP: Failing to create a Pod with less than min resources 08/24/23 12:04:26.567 +STEP: Failing to create a Pod with more than max resources 08/24/23 12:04:26.575 +STEP: Updating a LimitRange 08/24/23 12:04:26.582 +STEP: Verifying LimitRange updating is effective 08/24/23 12:04:26.6 +STEP: Creating a Pod with less than former min resources 08/24/23 12:04:28.611 +STEP: Failing to create a Pod with more than max resources 08/24/23 12:04:28.627 +STEP: Deleting a LimitRange 08/24/23 12:04:28.632 +STEP: Verifying the LimitRange was deleted 08/24/23 12:04:28.644 +Aug 24 12:04:33.652: INFO: limitRange is already deleted +STEP: Creating a Pod with more than former max resources 08/24/23 12:04:33.652 +[AfterEach] [sig-scheduling] LimitRange test/e2e/framework/node/init/init.go:32 -Jul 29 16:00:07.609: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Probing container +Aug 24 12:04:33.669: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-scheduling] LimitRange test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] [sig-scheduling] LimitRange dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] [sig-scheduling] LimitRange tear down framework | framework.go:193 -STEP: Destroying namespace "container-probe-7510" for this suite. 07/29/23 16:00:07.628 +STEP: Destroying namespace "limitrange-284" for this suite. 08/24/23 12:04:33.684 ------------------------------ -• [SLOW TEST] [142.933 seconds] -[sig-node] Probing container -test/e2e/common/node/framework.go:23 - should have monotonically increasing restart count [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:199 +• [SLOW TEST] [7.357 seconds] +[sig-scheduling] LimitRange +test/e2e/scheduling/framework.go:40 + should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] + test/e2e/scheduling/limit_range.go:61 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Probing container + [BeforeEach] [sig-scheduling] LimitRange set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 15:57:44.725 - Jul 29 15:57:44.725: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-probe 07/29/23 15:57:44.739 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 15:57:44.786 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 15:57:44.791 - [BeforeEach] [sig-node] Probing container + STEP: Creating a kubernetes client 08/24/23 12:04:26.339 + Aug 24 12:04:26.340: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename limitrange 08/24/23 12:04:26.342 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:04:26.377 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:04:26.387 + [BeforeEach] [sig-scheduling] LimitRange test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 - [It] should have monotonically increasing restart count [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:199 - STEP: Creating pod liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 in namespace container-probe-7510 07/29/23 15:57:44.801 - Jul 29 15:57:44.830: INFO: Waiting up to 5m0s for pod "liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762" in namespace "container-probe-7510" to be "not pending" - Jul 29 15:57:44.839: INFO: Pod "liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762": Phase="Pending", Reason="", readiness=false. Elapsed: 8.701141ms - Jul 29 15:57:46.849: INFO: Pod "liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762": Phase="Running", Reason="", readiness=true. Elapsed: 2.019068462s - Jul 29 15:57:46.850: INFO: Pod "liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762" satisfied condition "not pending" - Jul 29 15:57:46.850: INFO: Started pod liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 in namespace container-probe-7510 - STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 15:57:46.85 - Jul 29 15:57:46.858: INFO: Initial restart count of pod liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is 0 - Jul 29 15:58:06.981: INFO: Restart count of pod container-probe-7510/liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is now 1 (20.122284151s elapsed) - Jul 29 15:58:27.077: INFO: Restart count of pod container-probe-7510/liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is now 2 (40.218345019s elapsed) - Jul 29 15:58:47.183: INFO: Restart count of pod container-probe-7510/liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is now 3 (1m0.324218993s elapsed) - Jul 29 15:59:07.294: INFO: Restart count of pod container-probe-7510/liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is now 4 (1m20.435014119s elapsed) - Jul 29 16:00:07.585: INFO: Restart count of pod container-probe-7510/liveness-9cef85dd-4ea0-42e3-87d0-1a7264ae8762 is now 5 (2m20.72599516s elapsed) - STEP: deleting the pod 07/29/23 16:00:07.585 - [AfterEach] [sig-node] Probing container + [It] should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] + test/e2e/scheduling/limit_range.go:61 + STEP: Creating a LimitRange 08/24/23 12:04:26.395 + STEP: Setting up watch 08/24/23 12:04:26.396 + STEP: Submitting a LimitRange 08/24/23 12:04:26.504 + STEP: Verifying LimitRange creation was observed 08/24/23 12:04:26.517 + STEP: Fetching the LimitRange to ensure it has proper values 08/24/23 12:04:26.518 + Aug 24 12:04:26.527: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] + Aug 24 12:04:26.527: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] + STEP: Creating a Pod with no resource requirements 08/24/23 12:04:26.527 + STEP: Ensuring Pod has resource requirements applied from LimitRange 08/24/23 12:04:26.537 + Aug 24 12:04:26.543: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] + Aug 24 12:04:26.543: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] + STEP: Creating a Pod with partial resource requirements 08/24/23 12:04:26.544 + STEP: Ensuring Pod has merged resource requirements applied from LimitRange 08/24/23 12:04:26.555 + Aug 24 12:04:26.566: INFO: Verifying requests: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] + Aug 24 12:04:26.566: INFO: Verifying limits: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] + STEP: Failing to create a Pod with less than min resources 08/24/23 12:04:26.567 + STEP: Failing to create a Pod with more than max resources 08/24/23 12:04:26.575 + STEP: Updating a LimitRange 08/24/23 12:04:26.582 + STEP: Verifying LimitRange updating is effective 08/24/23 12:04:26.6 + STEP: Creating a Pod with less than former min resources 08/24/23 12:04:28.611 + STEP: Failing to create a Pod with more than max resources 08/24/23 12:04:28.627 + STEP: Deleting a LimitRange 08/24/23 12:04:28.632 + STEP: Verifying the LimitRange was deleted 08/24/23 12:04:28.644 + Aug 24 12:04:33.652: INFO: limitRange is already deleted + STEP: Creating a Pod with more than former max resources 08/24/23 12:04:33.652 + [AfterEach] [sig-scheduling] LimitRange test/e2e/framework/node/init/init.go:32 - Jul 29 16:00:07.609: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Probing container + Aug 24 12:04:33.669: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-scheduling] LimitRange test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] [sig-scheduling] LimitRange dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] [sig-scheduling] LimitRange tear down framework | framework.go:193 - STEP: Destroying namespace "container-probe-7510" for this suite. 07/29/23 16:00:07.628 + STEP: Destroying namespace "limitrange-284" for this suite. 08/24/23 12:04:33.684 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSS ------------------------------ -[sig-storage] Projected secret - should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:67 -[BeforeEach] [sig-storage] Projected secret +[sig-storage] Downward API volume + should provide podname only [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:53 +[BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:00:07.66 -Jul 29 16:00:07.660: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:00:07.665 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:07.727 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:07.733 -[BeforeEach] [sig-storage] Projected secret +STEP: Creating a kubernetes client 08/24/23 12:04:33.702 +Aug 24 12:04:33.703: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:04:33.706 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:04:33.736 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:04:33.742 +[BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:67 -STEP: Creating projection with secret that has name projected-secret-test-03230f9b-39dd-4a75-b79f-525ba9669ffc 07/29/23 16:00:07.739 -STEP: Creating a pod to test consume secrets 07/29/23 16:00:07.748 -Jul 29 16:00:07.769: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174" in namespace "projected-4360" to be "Succeeded or Failed" -Jul 29 16:00:07.825: INFO: Pod "pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174": Phase="Pending", Reason="", readiness=false. Elapsed: 55.559353ms -Jul 29 16:00:09.832: INFO: Pod "pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174": Phase="Pending", Reason="", readiness=false. Elapsed: 2.063130507s -Jul 29 16:00:11.838: INFO: Pod "pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.068801627s -STEP: Saw pod success 07/29/23 16:00:11.838 -Jul 29 16:00:11.839: INFO: Pod "pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174" satisfied condition "Succeeded or Failed" -Jul 29 16:00:11.844: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174 container projected-secret-volume-test: -STEP: delete the pod 07/29/23 16:00:11.873 -Jul 29 16:00:11.938: INFO: Waiting for pod pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174 to disappear -Jul 29 16:00:11.944: INFO: Pod pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174 no longer exists -[AfterEach] [sig-storage] Projected secret +[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 +[It] should provide podname only [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:53 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:04:33.748 +Aug 24 12:04:33.765: INFO: Waiting up to 5m0s for pod "downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb" in namespace "downward-api-5948" to be "Succeeded or Failed" +Aug 24 12:04:33.774: INFO: Pod "downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb": Phase="Pending", Reason="", readiness=false. Elapsed: 9.356768ms +Aug 24 12:04:35.782: INFO: Pod "downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017725016s +Aug 24 12:04:37.787: INFO: Pod "downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021804014s +STEP: Saw pod success 08/24/23 12:04:37.787 +Aug 24 12:04:37.788: INFO: Pod "downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb" satisfied condition "Succeeded or Failed" +Aug 24 12:04:37.793: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb container client-container: +STEP: delete the pod 08/24/23 12:04:37.805 +Aug 24 12:04:37.830: INFO: Waiting for pod downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb to disappear +Aug 24 12:04:37.843: INFO: Pod downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb no longer exists +[AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 16:00:11.944: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected secret +Aug 24 12:04:37.844: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected secret +[DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected secret +[DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "projected-4360" for this suite. 07/29/23 16:00:11.955 +STEP: Destroying namespace "downward-api-5948" for this suite. 08/24/23 12:04:37.856 ------------------------------ -• [4.312 seconds] -[sig-storage] Projected secret +• [4.171 seconds] +[sig-storage] Downward API volume test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:67 + should provide podname only [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:53 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected secret + [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:00:07.66 - Jul 29 16:00:07.660: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:00:07.665 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:07.727 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:07.733 - [BeforeEach] [sig-storage] Projected secret + STEP: Creating a kubernetes client 08/24/23 12:04:33.702 + Aug 24 12:04:33.703: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:04:33.706 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:04:33.736 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:04:33.742 + [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:67 - STEP: Creating projection with secret that has name projected-secret-test-03230f9b-39dd-4a75-b79f-525ba9669ffc 07/29/23 16:00:07.739 - STEP: Creating a pod to test consume secrets 07/29/23 16:00:07.748 - Jul 29 16:00:07.769: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174" in namespace "projected-4360" to be "Succeeded or Failed" - Jul 29 16:00:07.825: INFO: Pod "pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174": Phase="Pending", Reason="", readiness=false. Elapsed: 55.559353ms - Jul 29 16:00:09.832: INFO: Pod "pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174": Phase="Pending", Reason="", readiness=false. Elapsed: 2.063130507s - Jul 29 16:00:11.838: INFO: Pod "pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.068801627s - STEP: Saw pod success 07/29/23 16:00:11.838 - Jul 29 16:00:11.839: INFO: Pod "pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174" satisfied condition "Succeeded or Failed" - Jul 29 16:00:11.844: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174 container projected-secret-volume-test: - STEP: delete the pod 07/29/23 16:00:11.873 - Jul 29 16:00:11.938: INFO: Waiting for pod pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174 to disappear - Jul 29 16:00:11.944: INFO: Pod pod-projected-secrets-6865546f-f2dc-49dc-953d-078da0449174 no longer exists - [AfterEach] [sig-storage] Projected secret + [BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 + [It] should provide podname only [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:53 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:04:33.748 + Aug 24 12:04:33.765: INFO: Waiting up to 5m0s for pod "downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb" in namespace "downward-api-5948" to be "Succeeded or Failed" + Aug 24 12:04:33.774: INFO: Pod "downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb": Phase="Pending", Reason="", readiness=false. Elapsed: 9.356768ms + Aug 24 12:04:35.782: INFO: Pod "downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017725016s + Aug 24 12:04:37.787: INFO: Pod "downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021804014s + STEP: Saw pod success 08/24/23 12:04:37.787 + Aug 24 12:04:37.788: INFO: Pod "downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb" satisfied condition "Succeeded or Failed" + Aug 24 12:04:37.793: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb container client-container: + STEP: delete the pod 08/24/23 12:04:37.805 + Aug 24 12:04:37.830: INFO: Waiting for pod downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb to disappear + Aug 24 12:04:37.843: INFO: Pod downwardapi-volume-f00ba1bc-a5e4-449d-b394-013f45f95dfb no longer exists + [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 16:00:11.944: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected secret + Aug 24 12:04:37.844: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected secret + [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected secret + [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "projected-4360" for this suite. 07/29/23 16:00:11.955 + STEP: Destroying namespace "downward-api-5948" for this suite. 08/24/23 12:04:37.856 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSS ------------------------------ -[sig-apps] ReplicationController - should release no longer matching pods [Conformance] - test/e2e/apps/rc.go:101 -[BeforeEach] [sig-apps] ReplicationController +[sig-network] DNS + should provide DNS for pods for Hostname [Conformance] + test/e2e/network/dns.go:248 +[BeforeEach] [sig-network] DNS set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:00:11.976 -Jul 29 16:00:11.976: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replication-controller 07/29/23 16:00:11.979 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:12.013 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:12.017 -[BeforeEach] [sig-apps] ReplicationController +STEP: Creating a kubernetes client 08/24/23 12:04:37.881 +Aug 24 12:04:37.881: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename dns 08/24/23 12:04:37.883 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:04:37.923 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:04:37.929 +[BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 -[It] should release no longer matching pods [Conformance] - test/e2e/apps/rc.go:101 -STEP: Given a ReplicationController is created 07/29/23 16:00:12.021 -STEP: When the matched label of one of its pods change 07/29/23 16:00:12.029 -Jul 29 16:00:12.035: INFO: Pod name pod-release: Found 0 pods out of 1 -Jul 29 16:00:17.046: INFO: Pod name pod-release: Found 1 pods out of 1 -STEP: Then the pod is released 07/29/23 16:00:17.067 -[AfterEach] [sig-apps] ReplicationController +[It] should provide DNS for pods for Hostname [Conformance] + test/e2e/network/dns.go:248 +STEP: Creating a test headless service 08/24/23 12:04:37.938 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-9729.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-2.dns-test-service-2.dns-9729.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/wheezy_hosts@dns-querier-2;sleep 1; done + 08/24/23 12:04:37.983 +STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-9729.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-2.dns-test-service-2.dns-9729.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/jessie_hosts@dns-querier-2;sleep 1; done + 08/24/23 12:04:37.983 +STEP: creating a pod to probe DNS 08/24/23 12:04:37.984 +STEP: submitting the pod to kubernetes 08/24/23 12:04:37.984 +Aug 24 12:04:38.011: INFO: Waiting up to 15m0s for pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1" in namespace "dns-9729" to be "running" +Aug 24 12:04:38.019: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 8.090243ms +Aug 24 12:04:40.030: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019016863s +Aug 24 12:04:42.029: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018142982s +Aug 24 12:04:44.028: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 6.016569138s +Aug 24 12:04:46.027: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 8.015468253s +Aug 24 12:04:48.029: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 10.018173038s +Aug 24 12:04:50.027: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 12.01619479s +Aug 24 12:04:52.029: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 14.017392913s +Aug 24 12:04:54.033: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 16.022158599s +Aug 24 12:04:56.028: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 18.016404547s +Aug 24 12:04:58.028: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 20.016865109s +Aug 24 12:05:00.027: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 22.016198711s +Aug 24 12:05:02.029: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Running", Reason="", readiness=true. Elapsed: 24.017481083s +Aug 24 12:05:02.029: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1" satisfied condition "running" +STEP: retrieving the pod 08/24/23 12:05:02.029 +STEP: looking for the results for each expected name from probers 08/24/23 12:05:02.035 +Aug 24 12:05:02.065: INFO: DNS probes using dns-9729/dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1 succeeded + +STEP: deleting the pod 08/24/23 12:05:02.065 +STEP: deleting the test headless service 08/24/23 12:05:02.103 +[AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 -Jul 29 16:00:18.098: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ReplicationController +Aug 24 12:05:02.129: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ReplicationController +[DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ReplicationController +[DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 -STEP: Destroying namespace "replication-controller-8950" for this suite. 07/29/23 16:00:18.107 +STEP: Destroying namespace "dns-9729" for this suite. 08/24/23 12:05:02.139 ------------------------------ -• [SLOW TEST] [6.143 seconds] -[sig-apps] ReplicationController -test/e2e/apps/framework.go:23 - should release no longer matching pods [Conformance] - test/e2e/apps/rc.go:101 +• [SLOW TEST] [24.293 seconds] +[sig-network] DNS +test/e2e/network/common/framework.go:23 + should provide DNS for pods for Hostname [Conformance] + test/e2e/network/dns.go:248 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ReplicationController + [BeforeEach] [sig-network] DNS set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:00:11.976 - Jul 29 16:00:11.976: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replication-controller 07/29/23 16:00:11.979 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:12.013 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:12.017 - [BeforeEach] [sig-apps] ReplicationController + STEP: Creating a kubernetes client 08/24/23 12:04:37.881 + Aug 24 12:04:37.881: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename dns 08/24/23 12:04:37.883 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:04:37.923 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:04:37.929 + [BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 - [It] should release no longer matching pods [Conformance] - test/e2e/apps/rc.go:101 - STEP: Given a ReplicationController is created 07/29/23 16:00:12.021 - STEP: When the matched label of one of its pods change 07/29/23 16:00:12.029 - Jul 29 16:00:12.035: INFO: Pod name pod-release: Found 0 pods out of 1 - Jul 29 16:00:17.046: INFO: Pod name pod-release: Found 1 pods out of 1 - STEP: Then the pod is released 07/29/23 16:00:17.067 - [AfterEach] [sig-apps] ReplicationController + [It] should provide DNS for pods for Hostname [Conformance] + test/e2e/network/dns.go:248 + STEP: Creating a test headless service 08/24/23 12:04:37.938 + STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-9729.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-2.dns-test-service-2.dns-9729.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/wheezy_hosts@dns-querier-2;sleep 1; done + 08/24/23 12:04:37.983 + STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-9729.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-2.dns-test-service-2.dns-9729.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/jessie_hosts@dns-querier-2;sleep 1; done + 08/24/23 12:04:37.983 + STEP: creating a pod to probe DNS 08/24/23 12:04:37.984 + STEP: submitting the pod to kubernetes 08/24/23 12:04:37.984 + Aug 24 12:04:38.011: INFO: Waiting up to 15m0s for pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1" in namespace "dns-9729" to be "running" + Aug 24 12:04:38.019: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 8.090243ms + Aug 24 12:04:40.030: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019016863s + Aug 24 12:04:42.029: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018142982s + Aug 24 12:04:44.028: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 6.016569138s + Aug 24 12:04:46.027: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 8.015468253s + Aug 24 12:04:48.029: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 10.018173038s + Aug 24 12:04:50.027: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 12.01619479s + Aug 24 12:04:52.029: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 14.017392913s + Aug 24 12:04:54.033: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 16.022158599s + Aug 24 12:04:56.028: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 18.016404547s + Aug 24 12:04:58.028: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 20.016865109s + Aug 24 12:05:00.027: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Pending", Reason="", readiness=false. Elapsed: 22.016198711s + Aug 24 12:05:02.029: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1": Phase="Running", Reason="", readiness=true. Elapsed: 24.017481083s + Aug 24 12:05:02.029: INFO: Pod "dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1" satisfied condition "running" + STEP: retrieving the pod 08/24/23 12:05:02.029 + STEP: looking for the results for each expected name from probers 08/24/23 12:05:02.035 + Aug 24 12:05:02.065: INFO: DNS probes using dns-9729/dns-test-0d1f5060-6b46-4f2b-9158-4e8cb2a56be1 succeeded + + STEP: deleting the pod 08/24/23 12:05:02.065 + STEP: deleting the test headless service 08/24/23 12:05:02.103 + [AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 - Jul 29 16:00:18.098: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ReplicationController + Aug 24 12:05:02.129: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ReplicationController + [DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ReplicationController + [DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 - STEP: Destroying namespace "replication-controller-8950" for this suite. 07/29/23 16:00:18.107 + STEP: Destroying namespace "dns-9729" for this suite. 08/24/23 12:05:02.139 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] Daemon set [Serial] - should retry creating failed daemon pods [Conformance] - test/e2e/apps/daemon_set.go:305 -[BeforeEach] [sig-apps] Daemon set [Serial] +[sig-apps] Deployment + deployment should support proportional scaling [Conformance] + test/e2e/apps/deployment.go:160 +[BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:00:18.127 -Jul 29 16:00:18.129: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename daemonsets 07/29/23 16:00:18.131 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:18.164 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:18.169 -[BeforeEach] [sig-apps] Daemon set [Serial] +STEP: Creating a kubernetes client 08/24/23 12:05:02.177 +Aug 24 12:05:02.177: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename deployment 08/24/23 12:05:02.185 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:02.25 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:02.256 +[BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 -[It] should retry creating failed daemon pods [Conformance] - test/e2e/apps/daemon_set.go:305 -STEP: Creating a simple DaemonSet "daemon-set" 07/29/23 16:00:18.241 -STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 16:00:18.254 -Jul 29 16:00:18.274: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:00:18.276: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:00:19.335: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:00:19.335: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:00:20.300: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 -Jul 29 16:00:20.300: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set -STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived. 07/29/23 16:00:20.309 -Jul 29 16:00:20.359: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:00:20.359: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 -Jul 29 16:00:21.379: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:00:21.379: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 -Jul 29 16:00:22.379: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 -Jul 29 16:00:22.379: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set -STEP: Wait for the failed daemon pod to be completely deleted. 07/29/23 16:00:22.379 -[AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 -STEP: Deleting DaemonSet "daemon-set" 07/29/23 16:00:22.398 -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-7125, will wait for the garbage collector to delete the pods 07/29/23 16:00:22.398 -Jul 29 16:00:22.468: INFO: Deleting DaemonSet.extensions daemon-set took: 12.738849ms -Jul 29 16:00:22.569: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.543817ms -Jul 29 16:00:25.477: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:00:25.477: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set -Jul 29 16:00:25.482: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"13235"},"items":null} - -Jul 29 16:00:25.486: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"13235"},"items":null} - -[AfterEach] [sig-apps] Daemon set [Serial] +[BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 +[It] deployment should support proportional scaling [Conformance] + test/e2e/apps/deployment.go:160 +Aug 24 12:05:02.262: INFO: Creating deployment "webserver-deployment" +Aug 24 12:05:02.271: INFO: Waiting for observed generation 1 +Aug 24 12:05:04.339: INFO: Waiting for all required pods to come up +Aug 24 12:05:04.355: INFO: Pod name httpd: Found 10 pods out of 10 +STEP: ensuring each pod is running 08/24/23 12:05:04.355 +Aug 24 12:05:04.355: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-5rsbd" in namespace "deployment-6551" to be "running" +Aug 24 12:05:04.356: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-59lhj" in namespace "deployment-6551" to be "running" +Aug 24 12:05:04.356: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-6lh4w" in namespace "deployment-6551" to be "running" +Aug 24 12:05:04.356: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-vc7xn" in namespace "deployment-6551" to be "running" +Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-l2l4z" in namespace "deployment-6551" to be "running" +Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-92c7g" in namespace "deployment-6551" to be "running" +Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-h7szf" in namespace "deployment-6551" to be "running" +Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-nr2hf" in namespace "deployment-6551" to be "running" +Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-qkvvf" in namespace "deployment-6551" to be "running" +Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-qx8n4" in namespace "deployment-6551" to be "running" +Aug 24 12:05:04.369: INFO: Pod "webserver-deployment-7f5969cbc7-5rsbd": Phase="Pending", Reason="", readiness=false. Elapsed: 13.291498ms +Aug 24 12:05:04.369: INFO: Pod "webserver-deployment-7f5969cbc7-59lhj": Phase="Pending", Reason="", readiness=false. Elapsed: 13.055108ms +Aug 24 12:05:04.372: INFO: Pod "webserver-deployment-7f5969cbc7-qkvvf": Phase="Pending", Reason="", readiness=false. Elapsed: 13.577046ms +Aug 24 12:05:04.372: INFO: Pod "webserver-deployment-7f5969cbc7-h7szf": Phase="Pending", Reason="", readiness=false. Elapsed: 14.724481ms +Aug 24 12:05:04.373: INFO: Pod "webserver-deployment-7f5969cbc7-92c7g": Phase="Pending", Reason="", readiness=false. Elapsed: 15.26906ms +Aug 24 12:05:04.373: INFO: Pod "webserver-deployment-7f5969cbc7-vc7xn": Phase="Pending", Reason="", readiness=false. Elapsed: 16.011588ms +Aug 24 12:05:04.373: INFO: Pod "webserver-deployment-7f5969cbc7-nr2hf": Phase="Pending", Reason="", readiness=false. Elapsed: 14.943674ms +Aug 24 12:05:04.373: INFO: Pod "webserver-deployment-7f5969cbc7-6lh4w": Phase="Pending", Reason="", readiness=false. Elapsed: 17.155825ms +Aug 24 12:05:04.376: INFO: Pod "webserver-deployment-7f5969cbc7-qx8n4": Phase="Pending", Reason="", readiness=false. Elapsed: 16.379466ms +Aug 24 12:05:04.378: INFO: Pod "webserver-deployment-7f5969cbc7-l2l4z": Phase="Pending", Reason="", readiness=false. Elapsed: 20.606816ms +Aug 24 12:05:06.378: INFO: Pod "webserver-deployment-7f5969cbc7-5rsbd": Phase="Running", Reason="", readiness=true. Elapsed: 2.02270839s +Aug 24 12:05:06.379: INFO: Pod "webserver-deployment-7f5969cbc7-5rsbd" satisfied condition "running" +Aug 24 12:05:06.379: INFO: Pod "webserver-deployment-7f5969cbc7-59lhj": Phase="Running", Reason="", readiness=true. Elapsed: 2.022784921s +Aug 24 12:05:06.379: INFO: Pod "webserver-deployment-7f5969cbc7-59lhj" satisfied condition "running" +Aug 24 12:05:06.384: INFO: Pod "webserver-deployment-7f5969cbc7-nr2hf": Phase="Running", Reason="", readiness=true. Elapsed: 2.025602468s +Aug 24 12:05:06.384: INFO: Pod "webserver-deployment-7f5969cbc7-nr2hf" satisfied condition "running" +Aug 24 12:05:06.384: INFO: Pod "webserver-deployment-7f5969cbc7-vc7xn": Phase="Running", Reason="", readiness=true. Elapsed: 2.027336512s +Aug 24 12:05:06.385: INFO: Pod "webserver-deployment-7f5969cbc7-vc7xn" satisfied condition "running" +Aug 24 12:05:06.385: INFO: Pod "webserver-deployment-7f5969cbc7-6lh4w": Phase="Running", Reason="", readiness=true. Elapsed: 2.028936045s +Aug 24 12:05:06.385: INFO: Pod "webserver-deployment-7f5969cbc7-6lh4w" satisfied condition "running" +Aug 24 12:05:06.388: INFO: Pod "webserver-deployment-7f5969cbc7-92c7g": Phase="Running", Reason="", readiness=true. Elapsed: 2.030065101s +Aug 24 12:05:06.388: INFO: Pod "webserver-deployment-7f5969cbc7-92c7g" satisfied condition "running" +Aug 24 12:05:06.390: INFO: Pod "webserver-deployment-7f5969cbc7-qkvvf": Phase="Running", Reason="", readiness=true. Elapsed: 2.031347373s +Aug 24 12:05:06.390: INFO: Pod "webserver-deployment-7f5969cbc7-qkvvf" satisfied condition "running" +Aug 24 12:05:06.390: INFO: Pod "webserver-deployment-7f5969cbc7-qx8n4": Phase="Running", Reason="", readiness=true. Elapsed: 2.031278272s +Aug 24 12:05:06.390: INFO: Pod "webserver-deployment-7f5969cbc7-qx8n4" satisfied condition "running" +Aug 24 12:05:06.391: INFO: Pod "webserver-deployment-7f5969cbc7-l2l4z": Phase="Running", Reason="", readiness=true. Elapsed: 2.033725408s +Aug 24 12:05:06.391: INFO: Pod "webserver-deployment-7f5969cbc7-l2l4z" satisfied condition "running" +Aug 24 12:05:06.392: INFO: Pod "webserver-deployment-7f5969cbc7-h7szf": Phase="Running", Reason="", readiness=true. Elapsed: 2.034295724s +Aug 24 12:05:06.392: INFO: Pod "webserver-deployment-7f5969cbc7-h7szf" satisfied condition "running" +Aug 24 12:05:06.392: INFO: Waiting for deployment "webserver-deployment" to complete +Aug 24 12:05:06.410: INFO: Updating deployment "webserver-deployment" with a non-existent image +Aug 24 12:05:06.432: INFO: Updating deployment webserver-deployment +Aug 24 12:05:06.432: INFO: Waiting for observed generation 2 +Aug 24 12:05:08.449: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8 +Aug 24 12:05:08.455: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8 +Aug 24 12:05:08.464: INFO: Waiting for the first rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas +Aug 24 12:05:08.489: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0 +Aug 24 12:05:08.489: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5 +Aug 24 12:05:08.494: INFO: Waiting for the second rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas +Aug 24 12:05:08.514: INFO: Verifying that deployment "webserver-deployment" has minimum required number of available replicas +Aug 24 12:05:08.514: INFO: Scaling up the deployment "webserver-deployment" from 10 to 30 +Aug 24 12:05:08.536: INFO: Updating deployment webserver-deployment +Aug 24 12:05:08.536: INFO: Waiting for the replicasets of deployment "webserver-deployment" to have desired number of replicas +Aug 24 12:05:08.551: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20 +Aug 24 12:05:08.558: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13 +[AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 +Aug 24 12:05:08.575: INFO: Deployment "webserver-deployment": +&Deployment{ObjectMeta:{webserver-deployment deployment-6551 6a2f7ce3-9382-421a-8b0a-61c7919aed38 11559 3 2023-08-24 12:05:02 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:2] [] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status} {e2e.test Update apps/v1 2023-08-24 12:05:08 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:DeploymentSpec{Replicas:*30,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00440e898 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:13,UpdatedReplicas:5,AvailableReplicas:8,UnavailableReplicas:5,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-08-24 12:05:05 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "webserver-deployment-d9f79cb5" is progressing.,LastUpdateTime:2023-08-24 12:05:06 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,},},ReadyReplicas:8,CollisionCount:nil,},} + +Aug 24 12:05:08.587: INFO: New ReplicaSet "webserver-deployment-d9f79cb5" of Deployment "webserver-deployment": +&ReplicaSet{ObjectMeta:{webserver-deployment-d9f79cb5 deployment-6551 cda339a5-cb12-4559-b433-9ea4e42c5328 11562 3 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment webserver-deployment 6a2f7ce3-9382-421a-8b0a-61c7919aed38 0xc0043f6787 0xc0043f6788}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-08-24 12:05:08 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"6a2f7ce3-9382-421a-8b0a-61c7919aed38\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*13,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: d9f79cb5,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0043f6828 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:5,FullyLabeledReplicas:5,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:05:08.587: INFO: All old ReplicaSets of Deployment "webserver-deployment": +Aug 24 12:05:08.588: INFO: &ReplicaSet{ObjectMeta:{webserver-deployment-7f5969cbc7 deployment-6551 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 11560 3 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment webserver-deployment 6a2f7ce3-9382-421a-8b0a-61c7919aed38 0xc0043f6697 0xc0043f6698}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-08-24 12:05:08 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"6a2f7ce3-9382-421a-8b0a-61c7919aed38\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*20,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 7f5969cbc7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0043f6728 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:8,FullyLabeledReplicas:8,ObservedGeneration:2,ReadyReplicas:8,AvailableReplicas:8,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:05:08.605: INFO: Pod "webserver-deployment-7f5969cbc7-6lh4w" is available: +&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-6lh4w webserver-deployment-7f5969cbc7- deployment-6551 a0a34292-7d66-4c09-b687-01accdb065aa 11471 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f6d27 0xc0043f6d28}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:05 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.247\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-lbm9c,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lbm9c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.127,PodIP:10.233.64.247,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://6f799be3f2774bb983b0587ac4e086acea528410c157b09fb3e43f6bad9349dc,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.247,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.609: INFO: Pod "webserver-deployment-7f5969cbc7-92c7g" is available: +&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-92c7g webserver-deployment-7f5969cbc7- deployment-6551 e70b300e-8634-4ae1-b83d-a59abc96c108 11431 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f6f17 0xc0043f6f18}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.97\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-n529b,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n529b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:10.233.65.97,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://0f010483760a5f62718aee5733f7f3dad5cbf4b2ece76b7c0dcb4dd662d60dd4,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.97,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.610: INFO: Pod "webserver-deployment-7f5969cbc7-h7szf" is available: +&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-h7szf webserver-deployment-7f5969cbc7- deployment-6551 9e650a5e-b2b3-47d2-bfd7-f345e31172b9 11461 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f7107 0xc0043f7108}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:05 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.36\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-x7jcs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-x7jcs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.36,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://dd8f88223ef13d49ba60c65b078ffc7a3d9f9596231c62c21d645c7042afca60,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.36,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.611: INFO: Pod "webserver-deployment-7f5969cbc7-l2l4z" is available: +&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-l2l4z webserver-deployment-7f5969cbc7- deployment-6551 08a07cad-dc35-4813-aa21-919801abc458 11469 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f72f7 0xc0043f72f8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:05 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.21\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-7xpvl,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7xpvl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.127,PodIP:10.233.64.21,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://3db3ae17578b4e51008b894ce901fb43b9fc6bcaca533a3beb85af1faacac539,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.21,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.612: INFO: Pod "webserver-deployment-7f5969cbc7-llg98" is not available: +&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-llg98 webserver-deployment-7f5969cbc7- deployment-6551 60745212-95cb-4f98-99ea-eff9c4338dc6 11563 0 2023-08-24 12:05:08 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f74e7 0xc0043f74e8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:08 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-8wsc5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8wsc5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.613: INFO: Pod "webserver-deployment-7f5969cbc7-nr2hf" is available: +&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-nr2hf webserver-deployment-7f5969cbc7- deployment-6551 e1dbe24c-d890-4809-8f8f-4dcedf8ee7a8 11434 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f7627 0xc0043f7628}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.120\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-h25cb,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h25cb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:10.233.65.120,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://fb3e7a3a7ee14030722237f222ddff1884d051b8488759cdf92c517a05c9503c,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.120,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.614: INFO: Pod "webserver-deployment-7f5969cbc7-qkvvf" is available: +&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-qkvvf webserver-deployment-7f5969cbc7- deployment-6551 feff3bef-8605-491f-9cfc-2e211b70b3f0 11438 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f7817 0xc0043f7818}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.252\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-k5snl,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-k5snl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:10.233.65.252,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://5ee0d52a75649116fdb307e178b56705f62e96dd7faddd2a62f4473a8db3fb86,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.252,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.622: INFO: Pod "webserver-deployment-7f5969cbc7-qx8n4" is available: +&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-qx8n4 webserver-deployment-7f5969cbc7- deployment-6551 62ed2318-0402-4fa7-b3b1-2d285225f36c 11444 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f7a07 0xc0043f7a08}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.124\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ts289,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ts289,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.127,PodIP:10.233.64.124,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://3cb4baa38f3fb1e7c44935fe4bec2d8fd35202f39a7aaad54f538a3658a394c7,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.124,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.623: INFO: Pod "webserver-deployment-7f5969cbc7-vc7xn" is available: +&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-vc7xn webserver-deployment-7f5969cbc7- deployment-6551 8eab0c91-1efd-46d3-a77b-ca52e3aaa7b4 11464 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f7bf7 0xc0043f7bf8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:05 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.37\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-g4xm5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g4xm5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.37,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://214dd2d3eaf62e6bf3ed389c5ea8f2add3c0e4eedecf2a6d3bb4ed10ce760a94,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.37,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.623: INFO: Pod "webserver-deployment-d9f79cb5-46656" is not available: +&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-46656 webserver-deployment-d9f79cb5- deployment-6551 18ec467f-e668-49cd-9632-092d96446fc6 11524 0 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 cda339a5-cb12-4559-b433-9ea4e42c5328 0xc0043f7de7 0xc0043f7de8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cda339a5-cb12-4559-b433-9ea4e42c5328\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-8bdh6,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8bdh6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:,StartTime:2023-08-24 12:05:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.624: INFO: Pod "webserver-deployment-d9f79cb5-bx9wf" is not available: +&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-bx9wf webserver-deployment-d9f79cb5- deployment-6551 9be5550a-a806-4341-b4d6-91bacefd8062 11523 0 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 cda339a5-cb12-4559-b433-9ea4e42c5328 0xc0043f7fd7 0xc0043f7fd8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cda339a5-cb12-4559-b433-9ea4e42c5328\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-6vl8w,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vl8w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:,StartTime:2023-08-24 12:05:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.626: INFO: Pod "webserver-deployment-d9f79cb5-d5xxg" is not available: +&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-d5xxg webserver-deployment-d9f79cb5- deployment-6551 47fa12d9-2dd9-4ded-9d9e-9eeecc874a3d 11489 0 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 cda339a5-cb12-4559-b433-9ea4e42c5328 0xc0045001c7 0xc0045001c8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cda339a5-cb12-4559-b433-9ea4e42c5328\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ss2z5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ss2z5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:,StartTime:2023-08-24 12:05:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.627: INFO: Pod "webserver-deployment-d9f79cb5-d6p6s" is not available: +&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-d6p6s webserver-deployment-d9f79cb5- deployment-6551 8a9df42a-beb4-41dd-af82-78aaea9a464f 11500 0 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 cda339a5-cb12-4559-b433-9ea4e42c5328 0xc0045003b7 0xc0045003b8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cda339a5-cb12-4559-b433-9ea4e42c5328\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-xlkcc,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xlkcc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:,StartTime:2023-08-24 12:05:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:05:08.627: INFO: Pod "webserver-deployment-d9f79cb5-lc8zr" is not available: +&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-lc8zr webserver-deployment-d9f79cb5- deployment-6551 d9dbc594-5e24-4e64-8bc0-14f65f3cf35b 11505 0 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 cda339a5-cb12-4559-b433-9ea4e42c5328 0xc0045005a7 0xc0045005a8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cda339a5-cb12-4559-b433-9ea4e42c5328\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ktlp2,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ktlp2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.127,PodIP:,StartTime:2023-08-24 12:05:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 -Jul 29 16:00:25.513: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +Aug 24 12:05:08.628: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +[DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +[DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 -STEP: Destroying namespace "daemonsets-7125" for this suite. 07/29/23 16:00:25.52 +STEP: Destroying namespace "deployment-6551" for this suite. 08/24/23 12:05:08.647 ------------------------------ -• [SLOW TEST] [7.408 seconds] -[sig-apps] Daemon set [Serial] +• [SLOW TEST] [6.492 seconds] +[sig-apps] Deployment test/e2e/apps/framework.go:23 - should retry creating failed daemon pods [Conformance] - test/e2e/apps/daemon_set.go:305 + deployment should support proportional scaling [Conformance] + test/e2e/apps/deployment.go:160 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Daemon set [Serial] + [BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:00:18.127 - Jul 29 16:00:18.129: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename daemonsets 07/29/23 16:00:18.131 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:18.164 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:18.169 - [BeforeEach] [sig-apps] Daemon set [Serial] + STEP: Creating a kubernetes client 08/24/23 12:05:02.177 + Aug 24 12:05:02.177: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename deployment 08/24/23 12:05:02.185 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:02.25 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:02.256 + [BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 - [It] should retry creating failed daemon pods [Conformance] - test/e2e/apps/daemon_set.go:305 - STEP: Creating a simple DaemonSet "daemon-set" 07/29/23 16:00:18.241 - STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 16:00:18.254 - Jul 29 16:00:18.274: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:00:18.276: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:00:19.335: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:00:19.335: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:00:20.300: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 - Jul 29 16:00:20.300: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set - STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived. 07/29/23 16:00:20.309 - Jul 29 16:00:20.359: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:00:20.359: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 - Jul 29 16:00:21.379: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:00:21.379: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 - Jul 29 16:00:22.379: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 - Jul 29 16:00:22.379: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set - STEP: Wait for the failed daemon pod to be completely deleted. 07/29/23 16:00:22.379 - [AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 - STEP: Deleting DaemonSet "daemon-set" 07/29/23 16:00:22.398 - STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-7125, will wait for the garbage collector to delete the pods 07/29/23 16:00:22.398 - Jul 29 16:00:22.468: INFO: Deleting DaemonSet.extensions daemon-set took: 12.738849ms - Jul 29 16:00:22.569: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.543817ms - Jul 29 16:00:25.477: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:00:25.477: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set - Jul 29 16:00:25.482: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"13235"},"items":null} - - Jul 29 16:00:25.486: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"13235"},"items":null} - - [AfterEach] [sig-apps] Daemon set [Serial] + [BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 + [It] deployment should support proportional scaling [Conformance] + test/e2e/apps/deployment.go:160 + Aug 24 12:05:02.262: INFO: Creating deployment "webserver-deployment" + Aug 24 12:05:02.271: INFO: Waiting for observed generation 1 + Aug 24 12:05:04.339: INFO: Waiting for all required pods to come up + Aug 24 12:05:04.355: INFO: Pod name httpd: Found 10 pods out of 10 + STEP: ensuring each pod is running 08/24/23 12:05:04.355 + Aug 24 12:05:04.355: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-5rsbd" in namespace "deployment-6551" to be "running" + Aug 24 12:05:04.356: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-59lhj" in namespace "deployment-6551" to be "running" + Aug 24 12:05:04.356: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-6lh4w" in namespace "deployment-6551" to be "running" + Aug 24 12:05:04.356: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-vc7xn" in namespace "deployment-6551" to be "running" + Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-l2l4z" in namespace "deployment-6551" to be "running" + Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-92c7g" in namespace "deployment-6551" to be "running" + Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-h7szf" in namespace "deployment-6551" to be "running" + Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-nr2hf" in namespace "deployment-6551" to be "running" + Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-qkvvf" in namespace "deployment-6551" to be "running" + Aug 24 12:05:04.357: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-qx8n4" in namespace "deployment-6551" to be "running" + Aug 24 12:05:04.369: INFO: Pod "webserver-deployment-7f5969cbc7-5rsbd": Phase="Pending", Reason="", readiness=false. Elapsed: 13.291498ms + Aug 24 12:05:04.369: INFO: Pod "webserver-deployment-7f5969cbc7-59lhj": Phase="Pending", Reason="", readiness=false. Elapsed: 13.055108ms + Aug 24 12:05:04.372: INFO: Pod "webserver-deployment-7f5969cbc7-qkvvf": Phase="Pending", Reason="", readiness=false. Elapsed: 13.577046ms + Aug 24 12:05:04.372: INFO: Pod "webserver-deployment-7f5969cbc7-h7szf": Phase="Pending", Reason="", readiness=false. Elapsed: 14.724481ms + Aug 24 12:05:04.373: INFO: Pod "webserver-deployment-7f5969cbc7-92c7g": Phase="Pending", Reason="", readiness=false. Elapsed: 15.26906ms + Aug 24 12:05:04.373: INFO: Pod "webserver-deployment-7f5969cbc7-vc7xn": Phase="Pending", Reason="", readiness=false. Elapsed: 16.011588ms + Aug 24 12:05:04.373: INFO: Pod "webserver-deployment-7f5969cbc7-nr2hf": Phase="Pending", Reason="", readiness=false. Elapsed: 14.943674ms + Aug 24 12:05:04.373: INFO: Pod "webserver-deployment-7f5969cbc7-6lh4w": Phase="Pending", Reason="", readiness=false. Elapsed: 17.155825ms + Aug 24 12:05:04.376: INFO: Pod "webserver-deployment-7f5969cbc7-qx8n4": Phase="Pending", Reason="", readiness=false. Elapsed: 16.379466ms + Aug 24 12:05:04.378: INFO: Pod "webserver-deployment-7f5969cbc7-l2l4z": Phase="Pending", Reason="", readiness=false. Elapsed: 20.606816ms + Aug 24 12:05:06.378: INFO: Pod "webserver-deployment-7f5969cbc7-5rsbd": Phase="Running", Reason="", readiness=true. Elapsed: 2.02270839s + Aug 24 12:05:06.379: INFO: Pod "webserver-deployment-7f5969cbc7-5rsbd" satisfied condition "running" + Aug 24 12:05:06.379: INFO: Pod "webserver-deployment-7f5969cbc7-59lhj": Phase="Running", Reason="", readiness=true. Elapsed: 2.022784921s + Aug 24 12:05:06.379: INFO: Pod "webserver-deployment-7f5969cbc7-59lhj" satisfied condition "running" + Aug 24 12:05:06.384: INFO: Pod "webserver-deployment-7f5969cbc7-nr2hf": Phase="Running", Reason="", readiness=true. Elapsed: 2.025602468s + Aug 24 12:05:06.384: INFO: Pod "webserver-deployment-7f5969cbc7-nr2hf" satisfied condition "running" + Aug 24 12:05:06.384: INFO: Pod "webserver-deployment-7f5969cbc7-vc7xn": Phase="Running", Reason="", readiness=true. Elapsed: 2.027336512s + Aug 24 12:05:06.385: INFO: Pod "webserver-deployment-7f5969cbc7-vc7xn" satisfied condition "running" + Aug 24 12:05:06.385: INFO: Pod "webserver-deployment-7f5969cbc7-6lh4w": Phase="Running", Reason="", readiness=true. Elapsed: 2.028936045s + Aug 24 12:05:06.385: INFO: Pod "webserver-deployment-7f5969cbc7-6lh4w" satisfied condition "running" + Aug 24 12:05:06.388: INFO: Pod "webserver-deployment-7f5969cbc7-92c7g": Phase="Running", Reason="", readiness=true. Elapsed: 2.030065101s + Aug 24 12:05:06.388: INFO: Pod "webserver-deployment-7f5969cbc7-92c7g" satisfied condition "running" + Aug 24 12:05:06.390: INFO: Pod "webserver-deployment-7f5969cbc7-qkvvf": Phase="Running", Reason="", readiness=true. Elapsed: 2.031347373s + Aug 24 12:05:06.390: INFO: Pod "webserver-deployment-7f5969cbc7-qkvvf" satisfied condition "running" + Aug 24 12:05:06.390: INFO: Pod "webserver-deployment-7f5969cbc7-qx8n4": Phase="Running", Reason="", readiness=true. Elapsed: 2.031278272s + Aug 24 12:05:06.390: INFO: Pod "webserver-deployment-7f5969cbc7-qx8n4" satisfied condition "running" + Aug 24 12:05:06.391: INFO: Pod "webserver-deployment-7f5969cbc7-l2l4z": Phase="Running", Reason="", readiness=true. Elapsed: 2.033725408s + Aug 24 12:05:06.391: INFO: Pod "webserver-deployment-7f5969cbc7-l2l4z" satisfied condition "running" + Aug 24 12:05:06.392: INFO: Pod "webserver-deployment-7f5969cbc7-h7szf": Phase="Running", Reason="", readiness=true. Elapsed: 2.034295724s + Aug 24 12:05:06.392: INFO: Pod "webserver-deployment-7f5969cbc7-h7szf" satisfied condition "running" + Aug 24 12:05:06.392: INFO: Waiting for deployment "webserver-deployment" to complete + Aug 24 12:05:06.410: INFO: Updating deployment "webserver-deployment" with a non-existent image + Aug 24 12:05:06.432: INFO: Updating deployment webserver-deployment + Aug 24 12:05:06.432: INFO: Waiting for observed generation 2 + Aug 24 12:05:08.449: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8 + Aug 24 12:05:08.455: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8 + Aug 24 12:05:08.464: INFO: Waiting for the first rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas + Aug 24 12:05:08.489: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0 + Aug 24 12:05:08.489: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5 + Aug 24 12:05:08.494: INFO: Waiting for the second rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas + Aug 24 12:05:08.514: INFO: Verifying that deployment "webserver-deployment" has minimum required number of available replicas + Aug 24 12:05:08.514: INFO: Scaling up the deployment "webserver-deployment" from 10 to 30 + Aug 24 12:05:08.536: INFO: Updating deployment webserver-deployment + Aug 24 12:05:08.536: INFO: Waiting for the replicasets of deployment "webserver-deployment" to have desired number of replicas + Aug 24 12:05:08.551: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20 + Aug 24 12:05:08.558: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13 + [AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 + Aug 24 12:05:08.575: INFO: Deployment "webserver-deployment": + &Deployment{ObjectMeta:{webserver-deployment deployment-6551 6a2f7ce3-9382-421a-8b0a-61c7919aed38 11559 3 2023-08-24 12:05:02 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:2] [] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status} {e2e.test Update apps/v1 2023-08-24 12:05:08 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:DeploymentSpec{Replicas:*30,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00440e898 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:13,UpdatedReplicas:5,AvailableReplicas:8,UnavailableReplicas:5,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-08-24 12:05:05 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "webserver-deployment-d9f79cb5" is progressing.,LastUpdateTime:2023-08-24 12:05:06 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,},},ReadyReplicas:8,CollisionCount:nil,},} + + Aug 24 12:05:08.587: INFO: New ReplicaSet "webserver-deployment-d9f79cb5" of Deployment "webserver-deployment": + &ReplicaSet{ObjectMeta:{webserver-deployment-d9f79cb5 deployment-6551 cda339a5-cb12-4559-b433-9ea4e42c5328 11562 3 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment webserver-deployment 6a2f7ce3-9382-421a-8b0a-61c7919aed38 0xc0043f6787 0xc0043f6788}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-08-24 12:05:08 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"6a2f7ce3-9382-421a-8b0a-61c7919aed38\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*13,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: d9f79cb5,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0043f6828 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:5,FullyLabeledReplicas:5,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:05:08.587: INFO: All old ReplicaSets of Deployment "webserver-deployment": + Aug 24 12:05:08.588: INFO: &ReplicaSet{ObjectMeta:{webserver-deployment-7f5969cbc7 deployment-6551 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 11560 3 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment webserver-deployment 6a2f7ce3-9382-421a-8b0a-61c7919aed38 0xc0043f6697 0xc0043f6698}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-08-24 12:05:08 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"6a2f7ce3-9382-421a-8b0a-61c7919aed38\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*20,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 7f5969cbc7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0043f6728 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:8,FullyLabeledReplicas:8,ObservedGeneration:2,ReadyReplicas:8,AvailableReplicas:8,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:05:08.605: INFO: Pod "webserver-deployment-7f5969cbc7-6lh4w" is available: + &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-6lh4w webserver-deployment-7f5969cbc7- deployment-6551 a0a34292-7d66-4c09-b687-01accdb065aa 11471 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f6d27 0xc0043f6d28}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:05 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.247\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-lbm9c,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lbm9c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.127,PodIP:10.233.64.247,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://6f799be3f2774bb983b0587ac4e086acea528410c157b09fb3e43f6bad9349dc,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.247,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.609: INFO: Pod "webserver-deployment-7f5969cbc7-92c7g" is available: + &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-92c7g webserver-deployment-7f5969cbc7- deployment-6551 e70b300e-8634-4ae1-b83d-a59abc96c108 11431 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f6f17 0xc0043f6f18}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.97\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-n529b,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n529b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:10.233.65.97,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://0f010483760a5f62718aee5733f7f3dad5cbf4b2ece76b7c0dcb4dd662d60dd4,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.97,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.610: INFO: Pod "webserver-deployment-7f5969cbc7-h7szf" is available: + &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-h7szf webserver-deployment-7f5969cbc7- deployment-6551 9e650a5e-b2b3-47d2-bfd7-f345e31172b9 11461 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f7107 0xc0043f7108}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:05 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.36\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-x7jcs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-x7jcs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.36,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://dd8f88223ef13d49ba60c65b078ffc7a3d9f9596231c62c21d645c7042afca60,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.36,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.611: INFO: Pod "webserver-deployment-7f5969cbc7-l2l4z" is available: + &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-l2l4z webserver-deployment-7f5969cbc7- deployment-6551 08a07cad-dc35-4813-aa21-919801abc458 11469 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f72f7 0xc0043f72f8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:05 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.21\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-7xpvl,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7xpvl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.127,PodIP:10.233.64.21,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://3db3ae17578b4e51008b894ce901fb43b9fc6bcaca533a3beb85af1faacac539,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.21,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.612: INFO: Pod "webserver-deployment-7f5969cbc7-llg98" is not available: + &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-llg98 webserver-deployment-7f5969cbc7- deployment-6551 60745212-95cb-4f98-99ea-eff9c4338dc6 11563 0 2023-08-24 12:05:08 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f74e7 0xc0043f74e8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:08 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-8wsc5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8wsc5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.613: INFO: Pod "webserver-deployment-7f5969cbc7-nr2hf" is available: + &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-nr2hf webserver-deployment-7f5969cbc7- deployment-6551 e1dbe24c-d890-4809-8f8f-4dcedf8ee7a8 11434 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f7627 0xc0043f7628}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.120\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-h25cb,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h25cb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:10.233.65.120,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://fb3e7a3a7ee14030722237f222ddff1884d051b8488759cdf92c517a05c9503c,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.120,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.614: INFO: Pod "webserver-deployment-7f5969cbc7-qkvvf" is available: + &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-qkvvf webserver-deployment-7f5969cbc7- deployment-6551 feff3bef-8605-491f-9cfc-2e211b70b3f0 11438 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f7817 0xc0043f7818}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.252\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-k5snl,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-k5snl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:10.233.65.252,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://5ee0d52a75649116fdb307e178b56705f62e96dd7faddd2a62f4473a8db3fb86,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.252,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.622: INFO: Pod "webserver-deployment-7f5969cbc7-qx8n4" is available: + &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-qx8n4 webserver-deployment-7f5969cbc7- deployment-6551 62ed2318-0402-4fa7-b3b1-2d285225f36c 11444 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f7a07 0xc0043f7a08}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.124\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ts289,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ts289,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:04 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.127,PodIP:10.233.64.124,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://3cb4baa38f3fb1e7c44935fe4bec2d8fd35202f39a7aaad54f538a3658a394c7,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.124,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.623: INFO: Pod "webserver-deployment-7f5969cbc7-vc7xn" is available: + &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-vc7xn webserver-deployment-7f5969cbc7- deployment-6551 8eab0c91-1efd-46d3-a77b-ca52e3aaa7b4 11464 0 2023-08-24 12:05:02 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74 0xc0043f7bf7 0xc0043f7bf8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"5b4e2f01-5673-41c4-a6ab-50b5a1cb1f74\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:05 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.37\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-g4xm5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g4xm5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.37,StartTime:2023-08-24 12:05:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:04 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://214dd2d3eaf62e6bf3ed389c5ea8f2add3c0e4eedecf2a6d3bb4ed10ce760a94,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.37,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.623: INFO: Pod "webserver-deployment-d9f79cb5-46656" is not available: + &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-46656 webserver-deployment-d9f79cb5- deployment-6551 18ec467f-e668-49cd-9632-092d96446fc6 11524 0 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 cda339a5-cb12-4559-b433-9ea4e42c5328 0xc0043f7de7 0xc0043f7de8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cda339a5-cb12-4559-b433-9ea4e42c5328\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-8bdh6,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8bdh6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:,StartTime:2023-08-24 12:05:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.624: INFO: Pod "webserver-deployment-d9f79cb5-bx9wf" is not available: + &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-bx9wf webserver-deployment-d9f79cb5- deployment-6551 9be5550a-a806-4341-b4d6-91bacefd8062 11523 0 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 cda339a5-cb12-4559-b433-9ea4e42c5328 0xc0043f7fd7 0xc0043f7fd8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cda339a5-cb12-4559-b433-9ea4e42c5328\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-6vl8w,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vl8w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:,StartTime:2023-08-24 12:05:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.626: INFO: Pod "webserver-deployment-d9f79cb5-d5xxg" is not available: + &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-d5xxg webserver-deployment-d9f79cb5- deployment-6551 47fa12d9-2dd9-4ded-9d9e-9eeecc874a3d 11489 0 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 cda339a5-cb12-4559-b433-9ea4e42c5328 0xc0045001c7 0xc0045001c8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cda339a5-cb12-4559-b433-9ea4e42c5328\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ss2z5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ss2z5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:,StartTime:2023-08-24 12:05:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.627: INFO: Pod "webserver-deployment-d9f79cb5-d6p6s" is not available: + &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-d6p6s webserver-deployment-d9f79cb5- deployment-6551 8a9df42a-beb4-41dd-af82-78aaea9a464f 11500 0 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 cda339a5-cb12-4559-b433-9ea4e42c5328 0xc0045003b7 0xc0045003b8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cda339a5-cb12-4559-b433-9ea4e42c5328\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-xlkcc,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xlkcc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:,StartTime:2023-08-24 12:05:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:05:08.627: INFO: Pod "webserver-deployment-d9f79cb5-lc8zr" is not available: + &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-lc8zr webserver-deployment-d9f79cb5- deployment-6551 d9dbc594-5e24-4e64-8bc0-14f65f3cf35b 11505 0 2023-08-24 12:05:06 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 cda339a5-cb12-4559-b433-9ea4e42c5328 0xc0045005a7 0xc0045005a8}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cda339a5-cb12-4559-b433-9ea4e42c5328\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ktlp2,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ktlp2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.127,PodIP:,StartTime:2023-08-24 12:05:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} + [AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 - Jul 29 16:00:25.513: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + Aug 24 12:05:08.628: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + [DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + [DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 - STEP: Destroying namespace "daemonsets-7125" for this suite. 07/29/23 16:00:25.52 + STEP: Destroying namespace "deployment-6551" for this suite. 08/24/23 12:05:08.647 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Servers with support for Table transformation - should return a 406 for a backend which does not implement metadata [Conformance] - test/e2e/apimachinery/table_conversion.go:154 -[BeforeEach] [sig-api-machinery] Servers with support for Table transformation +[sig-node] Kubelet when scheduling a busybox command that always fails in a pod + should be possible to delete [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:135 +[BeforeEach] [sig-node] Kubelet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:00:25.54 -Jul 29 16:00:25.540: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename tables 07/29/23 16:00:25.542 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:25.567 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:25.572 -[BeforeEach] [sig-api-machinery] Servers with support for Table transformation +STEP: Creating a kubernetes client 08/24/23 12:05:08.675 +Aug 24 12:05:08.675: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubelet-test 08/24/23 12:05:08.68 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:08.798 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:08.805 +[BeforeEach] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] Servers with support for Table transformation - test/e2e/apimachinery/table_conversion.go:49 -[It] should return a 406 for a backend which does not implement metadata [Conformance] - test/e2e/apimachinery/table_conversion.go:154 -[AfterEach] [sig-api-machinery] Servers with support for Table transformation +[BeforeEach] [sig-node] Kubelet + test/e2e/common/node/kubelet.go:41 +[BeforeEach] when scheduling a busybox command that always fails in a pod + test/e2e/common/node/kubelet.go:85 +[It] should be possible to delete [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:135 +[AfterEach] [sig-node] Kubelet test/e2e/framework/node/init/init.go:32 -Jul 29 16:00:25.580: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation +Aug 24 12:05:08.860: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation +[DeferCleanup (Each)] [sig-node] Kubelet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation +[DeferCleanup (Each)] [sig-node] Kubelet tear down framework | framework.go:193 -STEP: Destroying namespace "tables-7229" for this suite. 07/29/23 16:00:25.59 +STEP: Destroying namespace "kubelet-test-5320" for this suite. 08/24/23 12:05:08.87 ------------------------------ -• [0.059 seconds] -[sig-api-machinery] Servers with support for Table transformation -test/e2e/apimachinery/framework.go:23 - should return a 406 for a backend which does not implement metadata [Conformance] - test/e2e/apimachinery/table_conversion.go:154 +• [0.213 seconds] +[sig-node] Kubelet +test/e2e/common/node/framework.go:23 + when scheduling a busybox command that always fails in a pod + test/e2e/common/node/kubelet.go:82 + should be possible to delete [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:135 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Servers with support for Table transformation + [BeforeEach] [sig-node] Kubelet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:00:25.54 - Jul 29 16:00:25.540: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename tables 07/29/23 16:00:25.542 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:25.567 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:25.572 - [BeforeEach] [sig-api-machinery] Servers with support for Table transformation + STEP: Creating a kubernetes client 08/24/23 12:05:08.675 + Aug 24 12:05:08.675: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubelet-test 08/24/23 12:05:08.68 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:08.798 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:08.805 + [BeforeEach] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] Servers with support for Table transformation - test/e2e/apimachinery/table_conversion.go:49 - [It] should return a 406 for a backend which does not implement metadata [Conformance] - test/e2e/apimachinery/table_conversion.go:154 - [AfterEach] [sig-api-machinery] Servers with support for Table transformation + [BeforeEach] [sig-node] Kubelet + test/e2e/common/node/kubelet.go:41 + [BeforeEach] when scheduling a busybox command that always fails in a pod + test/e2e/common/node/kubelet.go:85 + [It] should be possible to delete [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:135 + [AfterEach] [sig-node] Kubelet test/e2e/framework/node/init/init.go:32 - Jul 29 16:00:25.580: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation + Aug 24 12:05:08.860: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation + [DeferCleanup (Each)] [sig-node] Kubelet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation + [DeferCleanup (Each)] [sig-node] Kubelet tear down framework | framework.go:193 - STEP: Destroying namespace "tables-7229" for this suite. 07/29/23 16:00:25.59 + STEP: Destroying namespace "kubelet-test-5320" for this suite. 08/24/23 12:05:08.87 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSS ------------------------------ -[sig-node] Sysctls [LinuxOnly] [NodeConformance] - should support sysctls [MinimumKubeletVersion:1.21] [Conformance] - test/e2e/common/node/sysctl.go:77 -[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] - test/e2e/common/node/sysctl.go:37 -[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +[sig-architecture] Conformance Tests + should have at least two untainted nodes [Conformance] + test/e2e/architecture/conformance.go:38 +[BeforeEach] [sig-architecture] Conformance Tests set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:00:25.602 -Jul 29 16:00:25.602: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sysctl 07/29/23 16:00:25.604 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:25.627 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:25.632 -[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +STEP: Creating a kubernetes client 08/24/23 12:05:08.89 +Aug 24 12:05:08.890: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename conformance-tests 08/24/23 12:05:08.893 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:08.987 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:08.992 +[BeforeEach] [sig-architecture] Conformance Tests test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] - test/e2e/common/node/sysctl.go:67 -[It] should support sysctls [MinimumKubeletVersion:1.21] [Conformance] - test/e2e/common/node/sysctl.go:77 -STEP: Creating a pod with the kernel.shm_rmid_forced sysctl 07/29/23 16:00:25.638 -STEP: Watching for error events or started pod 07/29/23 16:00:25.653 -STEP: Waiting for pod completion 07/29/23 16:00:27.685 -Jul 29 16:00:27.685: INFO: Waiting up to 3m0s for pod "sysctl-eb67a6ee-cd87-497a-a518-6020a18f77fc" in namespace "sysctl-6964" to be "completed" -Jul 29 16:00:27.693: INFO: Pod "sysctl-eb67a6ee-cd87-497a-a518-6020a18f77fc": Phase="Pending", Reason="", readiness=false. Elapsed: 7.583079ms -Jul 29 16:00:29.705: INFO: Pod "sysctl-eb67a6ee-cd87-497a-a518-6020a18f77fc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.019484558s -Jul 29 16:00:29.705: INFO: Pod "sysctl-eb67a6ee-cd87-497a-a518-6020a18f77fc" satisfied condition "completed" -STEP: Checking that the pod succeeded 07/29/23 16:00:29.713 -STEP: Getting logs from the pod 07/29/23 16:00:29.713 -STEP: Checking that the sysctl is actually updated 07/29/23 16:00:29.729 -[AfterEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +[It] should have at least two untainted nodes [Conformance] + test/e2e/architecture/conformance.go:38 +STEP: Getting node addresses 08/24/23 12:05:08.999 +Aug 24 12:05:09.000: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +[AfterEach] [sig-architecture] Conformance Tests test/e2e/framework/node/init/init.go:32 -Jul 29 16:00:29.729: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +Aug 24 12:05:09.012: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-architecture] Conformance Tests test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +[DeferCleanup (Each)] [sig-architecture] Conformance Tests dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +[DeferCleanup (Each)] [sig-architecture] Conformance Tests tear down framework | framework.go:193 -STEP: Destroying namespace "sysctl-6964" for this suite. 07/29/23 16:00:29.741 +STEP: Destroying namespace "conformance-tests-691" for this suite. 08/24/23 12:05:09.024 ------------------------------ -• [4.154 seconds] -[sig-node] Sysctls [LinuxOnly] [NodeConformance] -test/e2e/common/node/framework.go:23 - should support sysctls [MinimumKubeletVersion:1.21] [Conformance] - test/e2e/common/node/sysctl.go:77 +• [0.148 seconds] +[sig-architecture] Conformance Tests +test/e2e/architecture/framework.go:23 + should have at least two untainted nodes [Conformance] + test/e2e/architecture/conformance.go:38 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] - test/e2e/common/node/sysctl.go:37 - [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + [BeforeEach] [sig-architecture] Conformance Tests set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:00:25.602 - Jul 29 16:00:25.602: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sysctl 07/29/23 16:00:25.604 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:25.627 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:25.632 - [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + STEP: Creating a kubernetes client 08/24/23 12:05:08.89 + Aug 24 12:05:08.890: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename conformance-tests 08/24/23 12:05:08.893 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:08.987 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:08.992 + [BeforeEach] [sig-architecture] Conformance Tests test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] - test/e2e/common/node/sysctl.go:67 - [It] should support sysctls [MinimumKubeletVersion:1.21] [Conformance] - test/e2e/common/node/sysctl.go:77 - STEP: Creating a pod with the kernel.shm_rmid_forced sysctl 07/29/23 16:00:25.638 - STEP: Watching for error events or started pod 07/29/23 16:00:25.653 - STEP: Waiting for pod completion 07/29/23 16:00:27.685 - Jul 29 16:00:27.685: INFO: Waiting up to 3m0s for pod "sysctl-eb67a6ee-cd87-497a-a518-6020a18f77fc" in namespace "sysctl-6964" to be "completed" - Jul 29 16:00:27.693: INFO: Pod "sysctl-eb67a6ee-cd87-497a-a518-6020a18f77fc": Phase="Pending", Reason="", readiness=false. Elapsed: 7.583079ms - Jul 29 16:00:29.705: INFO: Pod "sysctl-eb67a6ee-cd87-497a-a518-6020a18f77fc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.019484558s - Jul 29 16:00:29.705: INFO: Pod "sysctl-eb67a6ee-cd87-497a-a518-6020a18f77fc" satisfied condition "completed" - STEP: Checking that the pod succeeded 07/29/23 16:00:29.713 - STEP: Getting logs from the pod 07/29/23 16:00:29.713 - STEP: Checking that the sysctl is actually updated 07/29/23 16:00:29.729 - [AfterEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + [It] should have at least two untainted nodes [Conformance] + test/e2e/architecture/conformance.go:38 + STEP: Getting node addresses 08/24/23 12:05:08.999 + Aug 24 12:05:09.000: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable + [AfterEach] [sig-architecture] Conformance Tests test/e2e/framework/node/init/init.go:32 - Jul 29 16:00:29.729: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + Aug 24 12:05:09.012: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-architecture] Conformance Tests test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + [DeferCleanup (Each)] [sig-architecture] Conformance Tests dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + [DeferCleanup (Each)] [sig-architecture] Conformance Tests tear down framework | framework.go:193 - STEP: Destroying namespace "sysctl-6964" for this suite. 07/29/23 16:00:29.741 + STEP: Destroying namespace "conformance-tests-691" for this suite. 08/24/23 12:05:09.024 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should have session affinity work for NodePort service [LinuxOnly] [Conformance] - test/e2e/network/service.go:2228 -[BeforeEach] [sig-network] Services +[sig-node] PreStop + should call prestop when killing a pod [Conformance] + test/e2e/node/pre_stop.go:168 +[BeforeEach] [sig-node] PreStop set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:00:29.759 -Jul 29 16:00:29.759: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 16:00:29.764 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:29.794 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:29.799 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 12:05:09.042 +Aug 24 12:05:09.042: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename prestop 08/24/23 12:05:09.045 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:09.079 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:09.087 +[BeforeEach] [sig-node] PreStop test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should have session affinity work for NodePort service [LinuxOnly] [Conformance] - test/e2e/network/service.go:2228 -STEP: creating service in namespace services-5445 07/29/23 16:00:29.804 -STEP: creating service affinity-nodeport in namespace services-5445 07/29/23 16:00:29.804 -STEP: creating replication controller affinity-nodeport in namespace services-5445 07/29/23 16:00:29.827 -I0729 16:00:29.841583 13 runners.go:193] Created replication controller with name: affinity-nodeport, namespace: services-5445, replica count: 3 -I0729 16:00:32.894366 13 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Jul 29 16:00:32.914: INFO: Creating new exec pod -Jul 29 16:00:32.927: INFO: Waiting up to 5m0s for pod "execpod-affinity4cw5g" in namespace "services-5445" to be "running" -Jul 29 16:00:32.935: INFO: Pod "execpod-affinity4cw5g": Phase="Pending", Reason="", readiness=false. Elapsed: 8.185569ms -Jul 29 16:00:34.943: INFO: Pod "execpod-affinity4cw5g": Phase="Running", Reason="", readiness=true. Elapsed: 2.016032561s -Jul 29 16:00:34.943: INFO: Pod "execpod-affinity4cw5g" satisfied condition "running" -Jul 29 16:00:35.959: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-5445 exec execpod-affinity4cw5g -- /bin/sh -x -c nc -v -z -w 2 affinity-nodeport 80' -Jul 29 16:00:36.294: INFO: stderr: "+ nc -v -z -w 2 affinity-nodeport 80\nConnection to affinity-nodeport 80 port [tcp/http] succeeded!\n" -Jul 29 16:00:36.294: INFO: stdout: "" -Jul 29 16:00:36.294: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-5445 exec execpod-affinity4cw5g -- /bin/sh -x -c nc -v -z -w 2 10.233.7.172 80' -Jul 29 16:00:36.538: INFO: stderr: "+ nc -v -z -w 2 10.233.7.172 80\nConnection to 10.233.7.172 80 port [tcp/http] succeeded!\n" -Jul 29 16:00:36.538: INFO: stdout: "" -Jul 29 16:00:36.539: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-5445 exec execpod-affinity4cw5g -- /bin/sh -x -c nc -v -z -w 2 192.168.121.211 31560' -Jul 29 16:00:36.789: INFO: stderr: "+ nc -v -z -w 2 192.168.121.211 31560\nConnection to 192.168.121.211 31560 port [tcp/*] succeeded!\n" -Jul 29 16:00:36.789: INFO: stdout: "" -Jul 29 16:00:36.789: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-5445 exec execpod-affinity4cw5g -- /bin/sh -x -c nc -v -z -w 2 192.168.121.120 31560' -Jul 29 16:00:37.002: INFO: stderr: "+ nc -v -z -w 2 192.168.121.120 31560\nConnection to 192.168.121.120 31560 port [tcp/*] succeeded!\n" -Jul 29 16:00:37.002: INFO: stdout: "" -Jul 29 16:00:37.003: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-5445 exec execpod-affinity4cw5g -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.120:31560/ ; done' -Jul 29 16:00:37.445: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n" -Jul 29 16:00:37.445: INFO: stdout: "\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp" -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp -Jul 29 16:00:37.445: INFO: Cleaning up the exec pod -STEP: deleting ReplicationController affinity-nodeport in namespace services-5445, will wait for the garbage collector to delete the pods 07/29/23 16:00:37.472 -Jul 29 16:00:37.546: INFO: Deleting ReplicationController affinity-nodeport took: 16.06919ms -Jul 29 16:00:37.648: INFO: Terminating ReplicationController affinity-nodeport pods took: 101.221841ms -[AfterEach] [sig-network] Services +[BeforeEach] [sig-node] PreStop + test/e2e/node/pre_stop.go:159 +[It] should call prestop when killing a pod [Conformance] + test/e2e/node/pre_stop.go:168 +STEP: Creating server pod server in namespace prestop-5044 08/24/23 12:05:09.09 +STEP: Waiting for pods to come up. 08/24/23 12:05:09.106 +Aug 24 12:05:09.106: INFO: Waiting up to 5m0s for pod "server" in namespace "prestop-5044" to be "running" +Aug 24 12:05:09.115: INFO: Pod "server": Phase="Pending", Reason="", readiness=false. Elapsed: 8.132261ms +Aug 24 12:05:11.127: INFO: Pod "server": Phase="Running", Reason="", readiness=true. Elapsed: 2.020197005s +Aug 24 12:05:11.127: INFO: Pod "server" satisfied condition "running" +STEP: Creating tester pod tester in namespace prestop-5044 08/24/23 12:05:11.133 +Aug 24 12:05:11.144: INFO: Waiting up to 5m0s for pod "tester" in namespace "prestop-5044" to be "running" +Aug 24 12:05:11.156: INFO: Pod "tester": Phase="Pending", Reason="", readiness=false. Elapsed: 11.308871ms +Aug 24 12:05:13.163: INFO: Pod "tester": Phase="Running", Reason="", readiness=true. Elapsed: 2.018204767s +Aug 24 12:05:13.163: INFO: Pod "tester" satisfied condition "running" +STEP: Deleting pre-stop pod 08/24/23 12:05:13.163 +Aug 24 12:05:18.191: INFO: Saw: { + "Hostname": "server", + "Sent": null, + "Received": { + "prestop": 1 + }, + "Errors": null, + "Log": [ + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up." + ], + "StillContactingPeers": true +} +STEP: Deleting the server pod 08/24/23 12:05:18.192 +[AfterEach] [sig-node] PreStop test/e2e/framework/node/init/init.go:32 -Jul 29 16:00:39.489: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 12:05:18.217: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] PreStop test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-node] PreStop dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-node] PreStop tear down framework | framework.go:193 -STEP: Destroying namespace "services-5445" for this suite. 07/29/23 16:00:39.498 +STEP: Destroying namespace "prestop-5044" for this suite. 08/24/23 12:05:18.251 ------------------------------ -• [SLOW TEST] [9.750 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should have session affinity work for NodePort service [LinuxOnly] [Conformance] - test/e2e/network/service.go:2228 +• [SLOW TEST] [9.223 seconds] +[sig-node] PreStop +test/e2e/node/framework.go:23 + should call prestop when killing a pod [Conformance] + test/e2e/node/pre_stop.go:168 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-node] PreStop set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:00:29.759 - Jul 29 16:00:29.759: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 16:00:29.764 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:29.794 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:29.799 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 12:05:09.042 + Aug 24 12:05:09.042: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename prestop 08/24/23 12:05:09.045 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:09.079 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:09.087 + [BeforeEach] [sig-node] PreStop test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should have session affinity work for NodePort service [LinuxOnly] [Conformance] - test/e2e/network/service.go:2228 - STEP: creating service in namespace services-5445 07/29/23 16:00:29.804 - STEP: creating service affinity-nodeport in namespace services-5445 07/29/23 16:00:29.804 - STEP: creating replication controller affinity-nodeport in namespace services-5445 07/29/23 16:00:29.827 - I0729 16:00:29.841583 13 runners.go:193] Created replication controller with name: affinity-nodeport, namespace: services-5445, replica count: 3 - I0729 16:00:32.894366 13 runners.go:193] affinity-nodeport Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - Jul 29 16:00:32.914: INFO: Creating new exec pod - Jul 29 16:00:32.927: INFO: Waiting up to 5m0s for pod "execpod-affinity4cw5g" in namespace "services-5445" to be "running" - Jul 29 16:00:32.935: INFO: Pod "execpod-affinity4cw5g": Phase="Pending", Reason="", readiness=false. Elapsed: 8.185569ms - Jul 29 16:00:34.943: INFO: Pod "execpod-affinity4cw5g": Phase="Running", Reason="", readiness=true. Elapsed: 2.016032561s - Jul 29 16:00:34.943: INFO: Pod "execpod-affinity4cw5g" satisfied condition "running" - Jul 29 16:00:35.959: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-5445 exec execpod-affinity4cw5g -- /bin/sh -x -c nc -v -z -w 2 affinity-nodeport 80' - Jul 29 16:00:36.294: INFO: stderr: "+ nc -v -z -w 2 affinity-nodeport 80\nConnection to affinity-nodeport 80 port [tcp/http] succeeded!\n" - Jul 29 16:00:36.294: INFO: stdout: "" - Jul 29 16:00:36.294: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-5445 exec execpod-affinity4cw5g -- /bin/sh -x -c nc -v -z -w 2 10.233.7.172 80' - Jul 29 16:00:36.538: INFO: stderr: "+ nc -v -z -w 2 10.233.7.172 80\nConnection to 10.233.7.172 80 port [tcp/http] succeeded!\n" - Jul 29 16:00:36.538: INFO: stdout: "" - Jul 29 16:00:36.539: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-5445 exec execpod-affinity4cw5g -- /bin/sh -x -c nc -v -z -w 2 192.168.121.211 31560' - Jul 29 16:00:36.789: INFO: stderr: "+ nc -v -z -w 2 192.168.121.211 31560\nConnection to 192.168.121.211 31560 port [tcp/*] succeeded!\n" - Jul 29 16:00:36.789: INFO: stdout: "" - Jul 29 16:00:36.789: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-5445 exec execpod-affinity4cw5g -- /bin/sh -x -c nc -v -z -w 2 192.168.121.120 31560' - Jul 29 16:00:37.002: INFO: stderr: "+ nc -v -z -w 2 192.168.121.120 31560\nConnection to 192.168.121.120 31560 port [tcp/*] succeeded!\n" - Jul 29 16:00:37.002: INFO: stdout: "" - Jul 29 16:00:37.003: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-5445 exec execpod-affinity4cw5g -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.120:31560/ ; done' - Jul 29 16:00:37.445: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.120:31560/\n" - Jul 29 16:00:37.445: INFO: stdout: "\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp\naffinity-nodeport-s89sp" - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Received response from host: affinity-nodeport-s89sp - Jul 29 16:00:37.445: INFO: Cleaning up the exec pod - STEP: deleting ReplicationController affinity-nodeport in namespace services-5445, will wait for the garbage collector to delete the pods 07/29/23 16:00:37.472 - Jul 29 16:00:37.546: INFO: Deleting ReplicationController affinity-nodeport took: 16.06919ms - Jul 29 16:00:37.648: INFO: Terminating ReplicationController affinity-nodeport pods took: 101.221841ms - [AfterEach] [sig-network] Services + [BeforeEach] [sig-node] PreStop + test/e2e/node/pre_stop.go:159 + [It] should call prestop when killing a pod [Conformance] + test/e2e/node/pre_stop.go:168 + STEP: Creating server pod server in namespace prestop-5044 08/24/23 12:05:09.09 + STEP: Waiting for pods to come up. 08/24/23 12:05:09.106 + Aug 24 12:05:09.106: INFO: Waiting up to 5m0s for pod "server" in namespace "prestop-5044" to be "running" + Aug 24 12:05:09.115: INFO: Pod "server": Phase="Pending", Reason="", readiness=false. Elapsed: 8.132261ms + Aug 24 12:05:11.127: INFO: Pod "server": Phase="Running", Reason="", readiness=true. Elapsed: 2.020197005s + Aug 24 12:05:11.127: INFO: Pod "server" satisfied condition "running" + STEP: Creating tester pod tester in namespace prestop-5044 08/24/23 12:05:11.133 + Aug 24 12:05:11.144: INFO: Waiting up to 5m0s for pod "tester" in namespace "prestop-5044" to be "running" + Aug 24 12:05:11.156: INFO: Pod "tester": Phase="Pending", Reason="", readiness=false. Elapsed: 11.308871ms + Aug 24 12:05:13.163: INFO: Pod "tester": Phase="Running", Reason="", readiness=true. Elapsed: 2.018204767s + Aug 24 12:05:13.163: INFO: Pod "tester" satisfied condition "running" + STEP: Deleting pre-stop pod 08/24/23 12:05:13.163 + Aug 24 12:05:18.191: INFO: Saw: { + "Hostname": "server", + "Sent": null, + "Received": { + "prestop": 1 + }, + "Errors": null, + "Log": [ + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up." + ], + "StillContactingPeers": true + } + STEP: Deleting the server pod 08/24/23 12:05:18.192 + [AfterEach] [sig-node] PreStop test/e2e/framework/node/init/init.go:32 - Jul 29 16:00:39.489: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 12:05:18.217: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] PreStop test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-node] PreStop dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-node] PreStop tear down framework | framework.go:193 - STEP: Destroying namespace "services-5445" for this suite. 07/29/23 16:00:39.498 + STEP: Destroying namespace "prestop-5044" for this suite. 08/24/23 12:05:18.251 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] - Should recreate evicted statefulset [Conformance] - test/e2e/apps/statefulset.go:739 -[BeforeEach] [sig-apps] StatefulSet +[sig-node] ConfigMap + should fail to create ConfigMap with empty key [Conformance] + test/e2e/common/node/configmap.go:138 +[BeforeEach] [sig-node] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:00:39.517 -Jul 29 16:00:39.517: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename statefulset 07/29/23 16:00:39.519 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:39.551 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:39.556 -[BeforeEach] [sig-apps] StatefulSet +STEP: Creating a kubernetes client 08/24/23 12:05:18.269 +Aug 24 12:05:18.269: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 12:05:18.272 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:18.304 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:18.31 +[BeforeEach] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 -[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 -STEP: Creating service test in namespace statefulset-2561 07/29/23 16:00:39.562 -[It] Should recreate evicted statefulset [Conformance] - test/e2e/apps/statefulset.go:739 -STEP: Looking for a node to schedule stateful set and pod 07/29/23 16:00:39.577 -STEP: Creating pod with conflicting port in namespace statefulset-2561 07/29/23 16:00:39.588 -STEP: Waiting until pod test-pod will start running in namespace statefulset-2561 07/29/23 16:00:39.604 -Jul 29 16:00:39.604: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "statefulset-2561" to be "running" -Jul 29 16:00:39.622: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 17.998584ms -Jul 29 16:00:41.629: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.024841996s -Jul 29 16:00:41.629: INFO: Pod "test-pod" satisfied condition "running" -STEP: Creating statefulset with conflicting port in namespace statefulset-2561 07/29/23 16:00:41.629 -STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-2561 07/29/23 16:00:41.638 -Jul 29 16:00:41.666: INFO: Observed stateful pod in namespace: statefulset-2561, name: ss-0, uid: 15eaeb93-1b4d-4b1a-91b1-7c867b1986cb, status phase: Pending. Waiting for statefulset controller to delete. -Jul 29 16:00:41.696: INFO: Observed stateful pod in namespace: statefulset-2561, name: ss-0, uid: 15eaeb93-1b4d-4b1a-91b1-7c867b1986cb, status phase: Failed. Waiting for statefulset controller to delete. -Jul 29 16:00:41.744: INFO: Observed stateful pod in namespace: statefulset-2561, name: ss-0, uid: 15eaeb93-1b4d-4b1a-91b1-7c867b1986cb, status phase: Failed. Waiting for statefulset controller to delete. -Jul 29 16:00:41.749: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-2561 -STEP: Removing pod with conflicting port in namespace statefulset-2561 07/29/23 16:00:41.749 -STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-2561 and will be in running state 07/29/23 16:00:41.769 -[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 -Jul 29 16:00:55.835: INFO: Deleting all statefulset in ns statefulset-2561 -Jul 29 16:00:55.842: INFO: Scaling statefulset ss to 0 -Jul 29 16:01:05.881: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 16:01:05.888: INFO: Deleting statefulset ss -[AfterEach] [sig-apps] StatefulSet +[It] should fail to create ConfigMap with empty key [Conformance] + test/e2e/common/node/configmap.go:138 +STEP: Creating configMap that has name configmap-test-emptyKey-b329a5cd-f94b-45c7-944a-e11a4d385cea 08/24/23 12:05:18.314 +[AfterEach] [sig-node] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:01:05.910: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] StatefulSet +Aug 24 12:05:18.318: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-node] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-node] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "statefulset-2561" for this suite. 07/29/23 16:01:05.918 +STEP: Destroying namespace "configmap-5753" for this suite. 08/24/23 12:05:18.327 ------------------------------ -• [SLOW TEST] [26.412 seconds] -[sig-apps] StatefulSet -test/e2e/apps/framework.go:23 - Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:103 - Should recreate evicted statefulset [Conformance] - test/e2e/apps/statefulset.go:739 +• [0.070 seconds] +[sig-node] ConfigMap +test/e2e/common/node/framework.go:23 + should fail to create ConfigMap with empty key [Conformance] + test/e2e/common/node/configmap.go:138 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] StatefulSet + [BeforeEach] [sig-node] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:00:39.517 - Jul 29 16:00:39.517: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename statefulset 07/29/23 16:00:39.519 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:00:39.551 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:00:39.556 - [BeforeEach] [sig-apps] StatefulSet + STEP: Creating a kubernetes client 08/24/23 12:05:18.269 + Aug 24 12:05:18.269: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 12:05:18.272 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:18.304 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:18.31 + [BeforeEach] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 - [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 - STEP: Creating service test in namespace statefulset-2561 07/29/23 16:00:39.562 - [It] Should recreate evicted statefulset [Conformance] - test/e2e/apps/statefulset.go:739 - STEP: Looking for a node to schedule stateful set and pod 07/29/23 16:00:39.577 - STEP: Creating pod with conflicting port in namespace statefulset-2561 07/29/23 16:00:39.588 - STEP: Waiting until pod test-pod will start running in namespace statefulset-2561 07/29/23 16:00:39.604 - Jul 29 16:00:39.604: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "statefulset-2561" to be "running" - Jul 29 16:00:39.622: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 17.998584ms - Jul 29 16:00:41.629: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.024841996s - Jul 29 16:00:41.629: INFO: Pod "test-pod" satisfied condition "running" - STEP: Creating statefulset with conflicting port in namespace statefulset-2561 07/29/23 16:00:41.629 - STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-2561 07/29/23 16:00:41.638 - Jul 29 16:00:41.666: INFO: Observed stateful pod in namespace: statefulset-2561, name: ss-0, uid: 15eaeb93-1b4d-4b1a-91b1-7c867b1986cb, status phase: Pending. Waiting for statefulset controller to delete. - Jul 29 16:00:41.696: INFO: Observed stateful pod in namespace: statefulset-2561, name: ss-0, uid: 15eaeb93-1b4d-4b1a-91b1-7c867b1986cb, status phase: Failed. Waiting for statefulset controller to delete. - Jul 29 16:00:41.744: INFO: Observed stateful pod in namespace: statefulset-2561, name: ss-0, uid: 15eaeb93-1b4d-4b1a-91b1-7c867b1986cb, status phase: Failed. Waiting for statefulset controller to delete. - Jul 29 16:00:41.749: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-2561 - STEP: Removing pod with conflicting port in namespace statefulset-2561 07/29/23 16:00:41.749 - STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-2561 and will be in running state 07/29/23 16:00:41.769 - [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 - Jul 29 16:00:55.835: INFO: Deleting all statefulset in ns statefulset-2561 - Jul 29 16:00:55.842: INFO: Scaling statefulset ss to 0 - Jul 29 16:01:05.881: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 16:01:05.888: INFO: Deleting statefulset ss - [AfterEach] [sig-apps] StatefulSet + [It] should fail to create ConfigMap with empty key [Conformance] + test/e2e/common/node/configmap.go:138 + STEP: Creating configMap that has name configmap-test-emptyKey-b329a5cd-f94b-45c7-944a-e11a4d385cea 08/24/23 12:05:18.314 + [AfterEach] [sig-node] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:01:05.910: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] StatefulSet + Aug 24 12:05:18.318: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-node] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-node] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "statefulset-2561" for this suite. 07/29/23 16:01:05.918 + STEP: Destroying namespace "configmap-5753" for this suite. 08/24/23 12:05:18.327 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSS ------------------------------ -[sig-node] Container Runtime blackbox test when starting a container that exits - should run with the expected status [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:52 -[BeforeEach] [sig-node] Container Runtime +[sig-apps] Deployment + should run the lifecycle of a Deployment [Conformance] + test/e2e/apps/deployment.go:185 +[BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:01:05.93 -Jul 29 16:01:05.930: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-runtime 07/29/23 16:01:05.932 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:01:05.988 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:01:05.995 -[BeforeEach] [sig-node] Container Runtime +STEP: Creating a kubernetes client 08/24/23 12:05:18.34 +Aug 24 12:05:18.340: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename deployment 08/24/23 12:05:18.342 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:18.38 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:18.386 +[BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 -[It] should run with the expected status [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:52 -STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' 07/29/23 16:01:06.018 -STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' 07/29/23 16:01:24.214 -STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition 07/29/23 16:01:24.223 -STEP: Container 'terminate-cmd-rpa': should get the expected 'State' 07/29/23 16:01:24.235 -STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] 07/29/23 16:01:24.235 -STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' 07/29/23 16:01:24.327 -STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' 07/29/23 16:01:27.378 -STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition 07/29/23 16:01:29.403 -STEP: Container 'terminate-cmd-rpof': should get the expected 'State' 07/29/23 16:01:29.415 -STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] 07/29/23 16:01:29.415 -STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' 07/29/23 16:01:29.456 -STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' 07/29/23 16:01:30.474 -STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition 07/29/23 16:01:33.502 -STEP: Container 'terminate-cmd-rpn': should get the expected 'State' 07/29/23 16:01:33.513 -STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] 07/29/23 16:01:33.513 -[AfterEach] [sig-node] Container Runtime +[BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 +[It] should run the lifecycle of a Deployment [Conformance] + test/e2e/apps/deployment.go:185 +STEP: creating a Deployment 08/24/23 12:05:18.416 +STEP: waiting for Deployment to be created 08/24/23 12:05:18.427 +STEP: waiting for all Replicas to be Ready 08/24/23 12:05:18.429 +Aug 24 12:05:18.432: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Aug 24 12:05:18.432: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Aug 24 12:05:18.467: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Aug 24 12:05:18.467: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Aug 24 12:05:18.511: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Aug 24 12:05:18.511: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Aug 24 12:05:18.585: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Aug 24 12:05:18.585: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Aug 24 12:05:19.517: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment-static:true] +Aug 24 12:05:19.517: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment-static:true] +Aug 24 12:05:20.578: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 and labels map[test-deployment-static:true] +STEP: patching the Deployment 08/24/23 12:05:20.578 +W0824 12:05:20.594981 14 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" +Aug 24 12:05:20.598: INFO: observed event type ADDED +STEP: waiting for Replicas to scale 08/24/23 12:05:20.599 +Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 +Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 +Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 +Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 +Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 +Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 +Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 +Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 +Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:20.605: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:20.605: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:20.605: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:20.605: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:20.629: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:20.629: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:20.678: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:20.678: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:20.704: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:20.704: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:20.717: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:20.717: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:22.538: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:22.538: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:22.625: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +STEP: listing Deployments 08/24/23 12:05:22.625 +Aug 24 12:05:22.634: INFO: Found test-deployment with labels: map[test-deployment:patched test-deployment-static:true] +STEP: updating the Deployment 08/24/23 12:05:22.634 +Aug 24 12:05:22.693: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +STEP: fetching the DeploymentStatus 08/24/23 12:05:22.693 +Aug 24 12:05:22.725: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Aug 24 12:05:22.746: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Aug 24 12:05:22.858: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Aug 24 12:05:22.938: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Aug 24 12:05:22.990: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Aug 24 12:05:24.551: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] +Aug 24 12:05:24.608: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] +Aug 24 12:05:24.664: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] +Aug 24 12:05:25.766: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 3 and labels map[test-deployment:updated test-deployment-static:true] +STEP: patching the DeploymentStatus 08/24/23 12:05:25.906 +STEP: fetching the DeploymentStatus 08/24/23 12:05:25.921 +Aug 24 12:05:25.935: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:25.936: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:25.936: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:25.937: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:25.937: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 +Aug 24 12:05:25.937: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:25.938: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:25.938: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 +Aug 24 12:05:25.938: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 3 +STEP: deleting the Deployment 08/24/23 12:05:25.938 +Aug 24 12:05:25.960: INFO: observed event type MODIFIED +Aug 24 12:05:25.960: INFO: observed event type MODIFIED +Aug 24 12:05:25.961: INFO: observed event type MODIFIED +Aug 24 12:05:25.961: INFO: observed event type MODIFIED +Aug 24 12:05:25.963: INFO: observed event type MODIFIED +Aug 24 12:05:25.963: INFO: observed event type MODIFIED +Aug 24 12:05:25.963: INFO: observed event type MODIFIED +Aug 24 12:05:25.963: INFO: observed event type MODIFIED +Aug 24 12:05:25.964: INFO: observed event type MODIFIED +Aug 24 12:05:25.964: INFO: observed event type MODIFIED +[AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 +Aug 24 12:05:25.971: INFO: Log out all the ReplicaSets if there is no deployment created +Aug 24 12:05:25.976: INFO: ReplicaSet "test-deployment-7b7876f9d6": +&ReplicaSet{ObjectMeta:{test-deployment-7b7876f9d6 deployment-4924 4ec7a836-a017-419a-86fd-f2f85670194c 12017 2 2023-08-24 12:05:22 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:3] [{apps/v1 Deployment test-deployment ebda576a-4732-4827-bc12-b32ffb9fb026 0xc003e79c27 0xc003e79c28}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:24 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ebda576a-4732-4827-bc12-b32ffb9fb026\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:05:25 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 7b7876f9d6,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003e79cb0 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:2,FullyLabeledReplicas:2,ObservedGeneration:2,ReadyReplicas:2,AvailableReplicas:2,Conditions:[]ReplicaSetCondition{},},} + +Aug 24 12:05:25.986: INFO: pod: "test-deployment-7b7876f9d6-tn8g4": +&Pod{ObjectMeta:{test-deployment-7b7876f9d6-tn8g4 test-deployment-7b7876f9d6- deployment-4924 89ae3ae1-cebc-4eed-8440-6dbfd470fd0b 12015 0 2023-08-24 12:05:24 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7b7876f9d6 4ec7a836-a017-419a-86fd-f2f85670194c 0xc003fdc587 0xc003fdc588}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:24 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4ec7a836-a017-419a-86fd-f2f85670194c\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:25 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.20\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-tjlxn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tjlxn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:25 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:25 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.127,PodIP:10.233.64.20,StartTime:2023-08-24 12:05:24 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:25 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://84c815d43f419769d71787f4db41ecbf3542c94ee93daf75cec6bb7dc92aacbf,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.20,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + +Aug 24 12:05:25.987: INFO: pod: "test-deployment-7b7876f9d6-zvs52": +&Pod{ObjectMeta:{test-deployment-7b7876f9d6-zvs52 test-deployment-7b7876f9d6- deployment-4924 d67c5712-5ee1-4638-95b4-1485380e87c3 11986 0 2023-08-24 12:05:22 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7b7876f9d6 4ec7a836-a017-419a-86fd-f2f85670194c 0xc003fdc777 0xc003fdc778}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:22 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4ec7a836-a017-419a-86fd-f2f85670194c\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:24 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.245\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-r8mh6,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r8mh6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.245,StartTime:2023-08-24 12:05:22 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:23 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://0057bcf04dd54e84e4300442876ccc675b80d8f822f60600471aaaa8fb3b6f4e,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.245,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + +Aug 24 12:05:25.987: INFO: ReplicaSet "test-deployment-7df74c55ff": +&ReplicaSet{ObjectMeta:{test-deployment-7df74c55ff deployment-4924 1103d062-3854-4a2c-be90-c1085d9a1768 12026 4 2023-08-24 12:05:20 +0000 UTC map[pod-template-hash:7df74c55ff test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-deployment ebda576a-4732-4827-bc12-b32ffb9fb026 0xc003e79d17 0xc003e79d18}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:25 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ebda576a-4732-4827-bc12-b32ffb9fb026\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:05:25 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 7df74c55ff,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:7df74c55ff test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/pause:3.9 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003e79da0 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:4,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + +Aug 24 12:05:25.999: INFO: pod: "test-deployment-7df74c55ff-gtb68": +&Pod{ObjectMeta:{test-deployment-7df74c55ff-gtb68 test-deployment-7df74c55ff- deployment-4924 f939e517-649e-493d-ae66-dfd747d0a5c0 11995 0 2023-08-24 12:05:22 +0000 UTC 2023-08-24 12:05:25 +0000 UTC 0xc003fddb28 map[pod-template-hash:7df74c55ff test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7df74c55ff 1103d062-3854-4a2c-be90-c1085d9a1768 0xc003fddb57 0xc003fddb58}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:22 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1103d062-3854-4a2c-be90-c1085d9a1768\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:24 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.141\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-rfg5q,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/pause:3.9,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rfg5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:10.233.65.141,StartTime:2023-08-24 12:05:22 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:23 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/pause:3.9,ImageID:registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097,ContainerID:cri-o://1d6491adadd3414d5a23904e52d31104fec7b0ff2da6bb960008354d831b7bda,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.141,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + +Aug 24 12:05:26.000: INFO: pod: "test-deployment-7df74c55ff-p666g": +&Pod{ObjectMeta:{test-deployment-7df74c55ff-p666g test-deployment-7df74c55ff- deployment-4924 a347370d-96b3-416c-9947-aa26244b1bb2 12021 0 2023-08-24 12:05:20 +0000 UTC 2023-08-24 12:05:26 +0000 UTC 0xc003fddd20 map[pod-template-hash:7df74c55ff test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7df74c55ff 1103d062-3854-4a2c-be90-c1085d9a1768 0xc003fddd57 0xc003fddd58}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:20 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1103d062-3854-4a2c-be90-c1085d9a1768\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:22 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.143\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-2bpx8,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/pause:3.9,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2bpx8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:20 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:20 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.143,StartTime:2023-08-24 12:05:20 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:21 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/pause:3.9,ImageID:registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097,ContainerID:cri-o://b9726124fbcfd7dbba7b79569ad13a366c60acf553c1a6393909223a2e2361ef,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.143,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + +Aug 24 12:05:26.000: INFO: ReplicaSet "test-deployment-f4dbc4647": +&ReplicaSet{ObjectMeta:{test-deployment-f4dbc4647 deployment-4924 ab5f1146-d6bc-444e-9957-780b45019236 11924 3 2023-08-24 12:05:18 +0000 UTC map[pod-template-hash:f4dbc4647 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment ebda576a-4732-4827-bc12-b32ffb9fb026 0xc003e79e07 0xc003e79e08}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ebda576a-4732-4827-bc12-b32ffb9fb026\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:05:22 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: f4dbc4647,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:f4dbc4647 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003e79e90 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:3,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + +[AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 -Jul 29 16:01:33.554: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Container Runtime +Aug 24 12:05:26.008: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Container Runtime +[DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Container Runtime +[DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 -STEP: Destroying namespace "container-runtime-6968" for this suite. 07/29/23 16:01:33.562 +STEP: Destroying namespace "deployment-4924" for this suite. 08/24/23 12:05:26.024 ------------------------------ -• [SLOW TEST] [27.647 seconds] -[sig-node] Container Runtime -test/e2e/common/node/framework.go:23 - blackbox test - test/e2e/common/node/runtime.go:44 - when starting a container that exits - test/e2e/common/node/runtime.go:45 - should run with the expected status [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:52 +• [SLOW TEST] [7.695 seconds] +[sig-apps] Deployment +test/e2e/apps/framework.go:23 + should run the lifecycle of a Deployment [Conformance] + test/e2e/apps/deployment.go:185 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Container Runtime + [BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:01:05.93 - Jul 29 16:01:05.930: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-runtime 07/29/23 16:01:05.932 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:01:05.988 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:01:05.995 - [BeforeEach] [sig-node] Container Runtime + STEP: Creating a kubernetes client 08/24/23 12:05:18.34 + Aug 24 12:05:18.340: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename deployment 08/24/23 12:05:18.342 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:18.38 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:18.386 + [BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 - [It] should run with the expected status [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:52 - STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' 07/29/23 16:01:06.018 - STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' 07/29/23 16:01:24.214 - STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition 07/29/23 16:01:24.223 - STEP: Container 'terminate-cmd-rpa': should get the expected 'State' 07/29/23 16:01:24.235 - STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] 07/29/23 16:01:24.235 - STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' 07/29/23 16:01:24.327 - STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' 07/29/23 16:01:27.378 - STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition 07/29/23 16:01:29.403 - STEP: Container 'terminate-cmd-rpof': should get the expected 'State' 07/29/23 16:01:29.415 - STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] 07/29/23 16:01:29.415 - STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' 07/29/23 16:01:29.456 - STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' 07/29/23 16:01:30.474 - STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition 07/29/23 16:01:33.502 - STEP: Container 'terminate-cmd-rpn': should get the expected 'State' 07/29/23 16:01:33.513 - STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] 07/29/23 16:01:33.513 - [AfterEach] [sig-node] Container Runtime + [BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 + [It] should run the lifecycle of a Deployment [Conformance] + test/e2e/apps/deployment.go:185 + STEP: creating a Deployment 08/24/23 12:05:18.416 + STEP: waiting for Deployment to be created 08/24/23 12:05:18.427 + STEP: waiting for all Replicas to be Ready 08/24/23 12:05:18.429 + Aug 24 12:05:18.432: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] + Aug 24 12:05:18.432: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] + Aug 24 12:05:18.467: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] + Aug 24 12:05:18.467: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] + Aug 24 12:05:18.511: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] + Aug 24 12:05:18.511: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] + Aug 24 12:05:18.585: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] + Aug 24 12:05:18.585: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 and labels map[test-deployment-static:true] + Aug 24 12:05:19.517: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment-static:true] + Aug 24 12:05:19.517: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment-static:true] + Aug 24 12:05:20.578: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 and labels map[test-deployment-static:true] + STEP: patching the Deployment 08/24/23 12:05:20.578 + W0824 12:05:20.594981 14 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" + Aug 24 12:05:20.598: INFO: observed event type ADDED + STEP: waiting for Replicas to scale 08/24/23 12:05:20.599 + Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 + Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 + Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 + Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 + Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 + Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 + Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 + Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 0 + Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:20.604: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:20.605: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:20.605: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:20.605: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:20.605: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:20.629: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:20.629: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:20.678: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:20.678: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:20.704: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:20.704: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:20.717: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:20.717: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:22.538: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:22.538: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:22.625: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + STEP: listing Deployments 08/24/23 12:05:22.625 + Aug 24 12:05:22.634: INFO: Found test-deployment with labels: map[test-deployment:patched test-deployment-static:true] + STEP: updating the Deployment 08/24/23 12:05:22.634 + Aug 24 12:05:22.693: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + STEP: fetching the DeploymentStatus 08/24/23 12:05:22.693 + Aug 24 12:05:22.725: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] + Aug 24 12:05:22.746: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] + Aug 24 12:05:22.858: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] + Aug 24 12:05:22.938: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] + Aug 24 12:05:22.990: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] + Aug 24 12:05:24.551: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] + Aug 24 12:05:24.608: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] + Aug 24 12:05:24.664: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] + Aug 24 12:05:25.766: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 3 and labels map[test-deployment:updated test-deployment-static:true] + STEP: patching the DeploymentStatus 08/24/23 12:05:25.906 + STEP: fetching the DeploymentStatus 08/24/23 12:05:25.921 + Aug 24 12:05:25.935: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:25.936: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:25.936: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:25.937: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:25.937: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 1 + Aug 24 12:05:25.937: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:25.938: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:25.938: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 2 + Aug 24 12:05:25.938: INFO: observed Deployment test-deployment in namespace deployment-4924 with ReadyReplicas 3 + STEP: deleting the Deployment 08/24/23 12:05:25.938 + Aug 24 12:05:25.960: INFO: observed event type MODIFIED + Aug 24 12:05:25.960: INFO: observed event type MODIFIED + Aug 24 12:05:25.961: INFO: observed event type MODIFIED + Aug 24 12:05:25.961: INFO: observed event type MODIFIED + Aug 24 12:05:25.963: INFO: observed event type MODIFIED + Aug 24 12:05:25.963: INFO: observed event type MODIFIED + Aug 24 12:05:25.963: INFO: observed event type MODIFIED + Aug 24 12:05:25.963: INFO: observed event type MODIFIED + Aug 24 12:05:25.964: INFO: observed event type MODIFIED + Aug 24 12:05:25.964: INFO: observed event type MODIFIED + [AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 + Aug 24 12:05:25.971: INFO: Log out all the ReplicaSets if there is no deployment created + Aug 24 12:05:25.976: INFO: ReplicaSet "test-deployment-7b7876f9d6": + &ReplicaSet{ObjectMeta:{test-deployment-7b7876f9d6 deployment-4924 4ec7a836-a017-419a-86fd-f2f85670194c 12017 2 2023-08-24 12:05:22 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:3] [{apps/v1 Deployment test-deployment ebda576a-4732-4827-bc12-b32ffb9fb026 0xc003e79c27 0xc003e79c28}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:24 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ebda576a-4732-4827-bc12-b32ffb9fb026\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:05:25 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 7b7876f9d6,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003e79cb0 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:2,FullyLabeledReplicas:2,ObservedGeneration:2,ReadyReplicas:2,AvailableReplicas:2,Conditions:[]ReplicaSetCondition{},},} + + Aug 24 12:05:25.986: INFO: pod: "test-deployment-7b7876f9d6-tn8g4": + &Pod{ObjectMeta:{test-deployment-7b7876f9d6-tn8g4 test-deployment-7b7876f9d6- deployment-4924 89ae3ae1-cebc-4eed-8440-6dbfd470fd0b 12015 0 2023-08-24 12:05:24 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7b7876f9d6 4ec7a836-a017-419a-86fd-f2f85670194c 0xc003fdc587 0xc003fdc588}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:24 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4ec7a836-a017-419a-86fd-f2f85670194c\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:25 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.20\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-tjlxn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tjlxn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:25 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:25 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.127,PodIP:10.233.64.20,StartTime:2023-08-24 12:05:24 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:25 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://84c815d43f419769d71787f4db41ecbf3542c94ee93daf75cec6bb7dc92aacbf,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.20,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + + Aug 24 12:05:25.987: INFO: pod: "test-deployment-7b7876f9d6-zvs52": + &Pod{ObjectMeta:{test-deployment-7b7876f9d6-zvs52 test-deployment-7b7876f9d6- deployment-4924 d67c5712-5ee1-4638-95b4-1485380e87c3 11986 0 2023-08-24 12:05:22 +0000 UTC map[pod-template-hash:7b7876f9d6 test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7b7876f9d6 4ec7a836-a017-419a-86fd-f2f85670194c 0xc003fdc777 0xc003fdc778}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:22 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4ec7a836-a017-419a-86fd-f2f85670194c\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:24 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.245\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-r8mh6,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r8mh6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.245,StartTime:2023-08-24 12:05:22 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:23 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://0057bcf04dd54e84e4300442876ccc675b80d8f822f60600471aaaa8fb3b6f4e,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.245,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + + Aug 24 12:05:25.987: INFO: ReplicaSet "test-deployment-7df74c55ff": + &ReplicaSet{ObjectMeta:{test-deployment-7df74c55ff deployment-4924 1103d062-3854-4a2c-be90-c1085d9a1768 12026 4 2023-08-24 12:05:20 +0000 UTC map[pod-template-hash:7df74c55ff test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-deployment ebda576a-4732-4827-bc12-b32ffb9fb026 0xc003e79d17 0xc003e79d18}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:25 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ebda576a-4732-4827-bc12-b32ffb9fb026\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:05:25 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 7df74c55ff,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:7df74c55ff test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/pause:3.9 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003e79da0 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:4,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + + Aug 24 12:05:25.999: INFO: pod: "test-deployment-7df74c55ff-gtb68": + &Pod{ObjectMeta:{test-deployment-7df74c55ff-gtb68 test-deployment-7df74c55ff- deployment-4924 f939e517-649e-493d-ae66-dfd747d0a5c0 11995 0 2023-08-24 12:05:22 +0000 UTC 2023-08-24 12:05:25 +0000 UTC 0xc003fddb28 map[pod-template-hash:7df74c55ff test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7df74c55ff 1103d062-3854-4a2c-be90-c1085d9a1768 0xc003fddb57 0xc003fddb58}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:22 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1103d062-3854-4a2c-be90-c1085d9a1768\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:24 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.141\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-rfg5q,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/pause:3.9,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rfg5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:24 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.111,PodIP:10.233.65.141,StartTime:2023-08-24 12:05:22 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:23 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/pause:3.9,ImageID:registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097,ContainerID:cri-o://1d6491adadd3414d5a23904e52d31104fec7b0ff2da6bb960008354d831b7bda,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.141,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + + Aug 24 12:05:26.000: INFO: pod: "test-deployment-7df74c55ff-p666g": + &Pod{ObjectMeta:{test-deployment-7df74c55ff-p666g test-deployment-7df74c55ff- deployment-4924 a347370d-96b3-416c-9947-aa26244b1bb2 12021 0 2023-08-24 12:05:20 +0000 UTC 2023-08-24 12:05:26 +0000 UTC 0xc003fddd20 map[pod-template-hash:7df74c55ff test-deployment-static:true] map[] [{apps/v1 ReplicaSet test-deployment-7df74c55ff 1103d062-3854-4a2c-be90-c1085d9a1768 0xc003fddd57 0xc003fddd58}] [] [{kube-controller-manager Update v1 2023-08-24 12:05:20 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1103d062-3854-4a2c-be90-c1085d9a1768\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:05:22 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.143\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-2bpx8,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:registry.k8s.io/pause:3.9,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2bpx8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:20 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:22 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:05:20 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.143,StartTime:2023-08-24 12:05:20 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:05:21 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/pause:3.9,ImageID:registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097,ContainerID:cri-o://b9726124fbcfd7dbba7b79569ad13a366c60acf553c1a6393909223a2e2361ef,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.143,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + + Aug 24 12:05:26.000: INFO: ReplicaSet "test-deployment-f4dbc4647": + &ReplicaSet{ObjectMeta:{test-deployment-f4dbc4647 deployment-4924 ab5f1146-d6bc-444e-9957-780b45019236 11924 3 2023-08-24 12:05:18 +0000 UTC map[pod-template-hash:f4dbc4647 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment ebda576a-4732-4827-bc12-b32ffb9fb026 0xc003e79e07 0xc003e79e08}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:05:22 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ebda576a-4732-4827-bc12-b32ffb9fb026\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:05:22 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: f4dbc4647,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:f4dbc4647 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003e79e90 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:3,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + + [AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 - Jul 29 16:01:33.554: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Container Runtime + Aug 24 12:05:26.008: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Container Runtime + [DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Container Runtime + [DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 - STEP: Destroying namespace "container-runtime-6968" for this suite. 07/29/23 16:01:33.562 + STEP: Destroying namespace "deployment-4924" for this suite. 08/24/23 12:05:26.024 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSS ------------------------------ -[sig-network] Networking Granular Checks: Pods - should function for intra-pod communication: udp [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:93 -[BeforeEach] [sig-network] Networking +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] + test/e2e/apimachinery/webhook.go:277 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:01:33.58 -Jul 29 16:01:33.580: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pod-network-test 07/29/23 16:01:33.587 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:01:33.629 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:01:33.634 -[BeforeEach] [sig-network] Networking +STEP: Creating a kubernetes client 08/24/23 12:05:26.04 +Aug 24 12:05:26.040: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 12:05:26.043 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:26.075 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:26.08 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should function for intra-pod communication: udp [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:93 -STEP: Performing setup for networking test in namespace pod-network-test-1723 07/29/23 16:01:33.639 -STEP: creating a selector 07/29/23 16:01:33.639 -STEP: Creating the service pods in kubernetes 07/29/23 16:01:33.64 -Jul 29 16:01:33.640: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable -Jul 29 16:01:33.698: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-1723" to be "running and ready" -Jul 29 16:01:33.706: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 7.689884ms -Jul 29 16:01:33.706: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:01:35.715: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.016537148s -Jul 29 16:01:35.715: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:01:37.714: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.015669023s -Jul 29 16:01:37.714: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:01:39.716: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.017739155s -Jul 29 16:01:39.716: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:01:41.713: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.014791211s -Jul 29 16:01:41.713: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:01:43.714: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.015493688s -Jul 29 16:01:43.715: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:01:45.714: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 12.0152709s -Jul 29 16:01:45.714: INFO: The phase of Pod netserver-0 is Running (Ready = true) -Jul 29 16:01:45.714: INFO: Pod "netserver-0" satisfied condition "running and ready" -Jul 29 16:01:45.721: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-1723" to be "running and ready" -Jul 29 16:01:45.729: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 7.523769ms -Jul 29 16:01:45.729: INFO: The phase of Pod netserver-1 is Running (Ready = true) -Jul 29 16:01:45.729: INFO: Pod "netserver-1" satisfied condition "running and ready" -Jul 29 16:01:45.736: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-1723" to be "running and ready" -Jul 29 16:01:45.744: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 7.330736ms -Jul 29 16:01:45.744: INFO: The phase of Pod netserver-2 is Running (Ready = true) -Jul 29 16:01:45.745: INFO: Pod "netserver-2" satisfied condition "running and ready" -STEP: Creating test pods 07/29/23 16:01:45.753 -Jul 29 16:01:45.763: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-1723" to be "running" -Jul 29 16:01:45.770: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.39243ms -Jul 29 16:01:47.775: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.011759436s -Jul 29 16:01:47.775: INFO: Pod "test-container-pod" satisfied condition "running" -Jul 29 16:01:47.781: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 -Jul 29 16:01:47.782: INFO: Breadth first check of 10.233.64.144 on host 192.168.121.120... -Jul 29 16:01:47.788: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.206:9080/dial?request=hostname&protocol=udp&host=10.233.64.144&port=8081&tries=1'] Namespace:pod-network-test-1723 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:01:47.788: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:01:47.791: INFO: ExecWithOptions: Clientset creation -Jul 29 16:01:47.792: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-1723/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.206%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.64.144%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) -Jul 29 16:01:47.970: INFO: Waiting for responses: map[] -Jul 29 16:01:47.970: INFO: reached 10.233.64.144 after 0/1 tries -Jul 29 16:01:47.970: INFO: Breadth first check of 10.233.65.171 on host 192.168.121.211... -Jul 29 16:01:47.978: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.206:9080/dial?request=hostname&protocol=udp&host=10.233.65.171&port=8081&tries=1'] Namespace:pod-network-test-1723 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:01:47.978: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:01:47.979: INFO: ExecWithOptions: Clientset creation -Jul 29 16:01:47.979: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-1723/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.206%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.65.171%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) -Jul 29 16:01:48.107: INFO: Waiting for responses: map[] -Jul 29 16:01:48.107: INFO: reached 10.233.65.171 after 0/1 tries -Jul 29 16:01:48.107: INFO: Breadth first check of 10.233.66.137 on host 192.168.121.141... -Jul 29 16:01:48.114: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.206:9080/dial?request=hostname&protocol=udp&host=10.233.66.137&port=8081&tries=1'] Namespace:pod-network-test-1723 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:01:48.114: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:01:48.116: INFO: ExecWithOptions: Clientset creation -Jul 29 16:01:48.116: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-1723/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.206%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.66.137%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) -Jul 29 16:01:48.212: INFO: Waiting for responses: map[] -Jul 29 16:01:48.213: INFO: reached 10.233.66.137 after 0/1 tries -Jul 29 16:01:48.213: INFO: Going to retry 0 out of 3 pods.... -[AfterEach] [sig-network] Networking +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 12:05:26.128 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:05:27.575 +STEP: Deploying the webhook pod 08/24/23 12:05:27.589 +STEP: Wait for the deployment to be ready 08/24/23 12:05:27.607 +Aug 24 12:05:27.620: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service 08/24/23 12:05:29.656 +STEP: Verifying the service has paired with the endpoint 08/24/23 12:05:29.685 +Aug 24 12:05:30.686: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] + test/e2e/apimachinery/webhook.go:277 +STEP: Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API 08/24/23 12:05:30.693 +STEP: Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API 08/24/23 12:05:30.724 +STEP: Creating a dummy validating-webhook-configuration object 08/24/23 12:05:30.75 +STEP: Deleting the validating-webhook-configuration, which should be possible to remove 08/24/23 12:05:30.765 +STEP: Creating a dummy mutating-webhook-configuration object 08/24/23 12:05:30.778 +STEP: Deleting the mutating-webhook-configuration, which should be possible to remove 08/24/23 12:05:30.792 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:01:48.213: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Networking +Aug 24 12:05:30.828: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Networking +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Networking +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "pod-network-test-1723" for this suite. 07/29/23 16:01:48.223 +STEP: Destroying namespace "webhook-5068" for this suite. 08/24/23 12:05:30.919 +STEP: Destroying namespace "webhook-5068-markers" for this suite. 08/24/23 12:05:30.948 ------------------------------ -• [SLOW TEST] [14.662 seconds] -[sig-network] Networking -test/e2e/common/network/framework.go:23 - Granular Checks: Pods - test/e2e/common/network/networking.go:32 - should function for intra-pod communication: udp [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:93 +• [4.932 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] + test/e2e/apimachinery/webhook.go:277 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Networking + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:01:33.58 - Jul 29 16:01:33.580: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pod-network-test 07/29/23 16:01:33.587 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:01:33.629 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:01:33.634 - [BeforeEach] [sig-network] Networking + STEP: Creating a kubernetes client 08/24/23 12:05:26.04 + Aug 24 12:05:26.040: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 12:05:26.043 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:26.075 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:26.08 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should function for intra-pod communication: udp [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:93 - STEP: Performing setup for networking test in namespace pod-network-test-1723 07/29/23 16:01:33.639 - STEP: creating a selector 07/29/23 16:01:33.639 - STEP: Creating the service pods in kubernetes 07/29/23 16:01:33.64 - Jul 29 16:01:33.640: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable - Jul 29 16:01:33.698: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-1723" to be "running and ready" - Jul 29 16:01:33.706: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 7.689884ms - Jul 29 16:01:33.706: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:01:35.715: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.016537148s - Jul 29 16:01:35.715: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:01:37.714: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.015669023s - Jul 29 16:01:37.714: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:01:39.716: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.017739155s - Jul 29 16:01:39.716: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:01:41.713: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.014791211s - Jul 29 16:01:41.713: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:01:43.714: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.015493688s - Jul 29 16:01:43.715: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:01:45.714: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 12.0152709s - Jul 29 16:01:45.714: INFO: The phase of Pod netserver-0 is Running (Ready = true) - Jul 29 16:01:45.714: INFO: Pod "netserver-0" satisfied condition "running and ready" - Jul 29 16:01:45.721: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-1723" to be "running and ready" - Jul 29 16:01:45.729: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 7.523769ms - Jul 29 16:01:45.729: INFO: The phase of Pod netserver-1 is Running (Ready = true) - Jul 29 16:01:45.729: INFO: Pod "netserver-1" satisfied condition "running and ready" - Jul 29 16:01:45.736: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-1723" to be "running and ready" - Jul 29 16:01:45.744: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 7.330736ms - Jul 29 16:01:45.744: INFO: The phase of Pod netserver-2 is Running (Ready = true) - Jul 29 16:01:45.745: INFO: Pod "netserver-2" satisfied condition "running and ready" - STEP: Creating test pods 07/29/23 16:01:45.753 - Jul 29 16:01:45.763: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-1723" to be "running" - Jul 29 16:01:45.770: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.39243ms - Jul 29 16:01:47.775: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.011759436s - Jul 29 16:01:47.775: INFO: Pod "test-container-pod" satisfied condition "running" - Jul 29 16:01:47.781: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 - Jul 29 16:01:47.782: INFO: Breadth first check of 10.233.64.144 on host 192.168.121.120... - Jul 29 16:01:47.788: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.206:9080/dial?request=hostname&protocol=udp&host=10.233.64.144&port=8081&tries=1'] Namespace:pod-network-test-1723 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:01:47.788: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:01:47.791: INFO: ExecWithOptions: Clientset creation - Jul 29 16:01:47.792: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-1723/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.206%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.64.144%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) - Jul 29 16:01:47.970: INFO: Waiting for responses: map[] - Jul 29 16:01:47.970: INFO: reached 10.233.64.144 after 0/1 tries - Jul 29 16:01:47.970: INFO: Breadth first check of 10.233.65.171 on host 192.168.121.211... - Jul 29 16:01:47.978: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.206:9080/dial?request=hostname&protocol=udp&host=10.233.65.171&port=8081&tries=1'] Namespace:pod-network-test-1723 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:01:47.978: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:01:47.979: INFO: ExecWithOptions: Clientset creation - Jul 29 16:01:47.979: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-1723/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.206%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.65.171%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) - Jul 29 16:01:48.107: INFO: Waiting for responses: map[] - Jul 29 16:01:48.107: INFO: reached 10.233.65.171 after 0/1 tries - Jul 29 16:01:48.107: INFO: Breadth first check of 10.233.66.137 on host 192.168.121.141... - Jul 29 16:01:48.114: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.206:9080/dial?request=hostname&protocol=udp&host=10.233.66.137&port=8081&tries=1'] Namespace:pod-network-test-1723 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:01:48.114: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:01:48.116: INFO: ExecWithOptions: Clientset creation - Jul 29 16:01:48.116: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-1723/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.206%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.66.137%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) - Jul 29 16:01:48.212: INFO: Waiting for responses: map[] - Jul 29 16:01:48.213: INFO: reached 10.233.66.137 after 0/1 tries - Jul 29 16:01:48.213: INFO: Going to retry 0 out of 3 pods.... - [AfterEach] [sig-network] Networking + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 12:05:26.128 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:05:27.575 + STEP: Deploying the webhook pod 08/24/23 12:05:27.589 + STEP: Wait for the deployment to be ready 08/24/23 12:05:27.607 + Aug 24 12:05:27.620: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set + STEP: Deploying the webhook service 08/24/23 12:05:29.656 + STEP: Verifying the service has paired with the endpoint 08/24/23 12:05:29.685 + Aug 24 12:05:30.686: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] + test/e2e/apimachinery/webhook.go:277 + STEP: Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API 08/24/23 12:05:30.693 + STEP: Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API 08/24/23 12:05:30.724 + STEP: Creating a dummy validating-webhook-configuration object 08/24/23 12:05:30.75 + STEP: Deleting the validating-webhook-configuration, which should be possible to remove 08/24/23 12:05:30.765 + STEP: Creating a dummy mutating-webhook-configuration object 08/24/23 12:05:30.778 + STEP: Deleting the mutating-webhook-configuration, which should be possible to remove 08/24/23 12:05:30.792 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:01:48.213: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Networking + Aug 24 12:05:30.828: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Networking + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Networking + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "pod-network-test-1723" for this suite. 07/29/23 16:01:48.223 + STEP: Destroying namespace "webhook-5068" for this suite. 08/24/23 12:05:30.919 + STEP: Destroying namespace "webhook-5068-markers" for this suite. 08/24/23 12:05:30.948 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Namespaces [Serial] - should ensure that all services are removed when a namespace is deleted [Conformance] - test/e2e/apimachinery/namespace.go:251 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +[sig-storage] Downward API volume + should update annotations on modification [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:162 +[BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:01:48.244 -Jul 29 16:01:48.244: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename namespaces 07/29/23 16:01:48.249 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:01:48.287 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:01:48.293 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +STEP: Creating a kubernetes client 08/24/23 12:05:30.989 +Aug 24 12:05:30.989: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:05:31.01 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:31.064 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:31.076 +[BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 -[It] should ensure that all services are removed when a namespace is deleted [Conformance] - test/e2e/apimachinery/namespace.go:251 -STEP: Creating a test namespace 07/29/23 16:01:48.299 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:01:48.337 -STEP: Creating a service in the namespace 07/29/23 16:01:48.341 -STEP: Deleting the namespace 07/29/23 16:01:48.365 -STEP: Waiting for the namespace to be removed. 07/29/23 16:01:48.377 -STEP: Recreating the namespace 07/29/23 16:01:54.41 -STEP: Verifying there is no service in the namespace 07/29/23 16:01:54.474 -[AfterEach] [sig-api-machinery] Namespaces [Serial] +[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 +[It] should update annotations on modification [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:162 +STEP: Creating the pod 08/24/23 12:05:31.086 +Aug 24 12:05:31.109: INFO: Waiting up to 5m0s for pod "annotationupdate7d969f22-c79c-459d-8168-2416c4e51539" in namespace "downward-api-5981" to be "running and ready" +Aug 24 12:05:31.121: INFO: Pod "annotationupdate7d969f22-c79c-459d-8168-2416c4e51539": Phase="Pending", Reason="", readiness=false. Elapsed: 11.513924ms +Aug 24 12:05:31.121: INFO: The phase of Pod annotationupdate7d969f22-c79c-459d-8168-2416c4e51539 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:05:33.138: INFO: Pod "annotationupdate7d969f22-c79c-459d-8168-2416c4e51539": Phase="Running", Reason="", readiness=true. Elapsed: 2.027852365s +Aug 24 12:05:33.138: INFO: The phase of Pod annotationupdate7d969f22-c79c-459d-8168-2416c4e51539 is Running (Ready = true) +Aug 24 12:05:33.138: INFO: Pod "annotationupdate7d969f22-c79c-459d-8168-2416c4e51539" satisfied condition "running and ready" +Aug 24 12:05:33.675: INFO: Successfully updated pod "annotationupdate7d969f22-c79c-459d-8168-2416c4e51539" +[AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 16:01:54.482: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +Aug 24 12:05:37.716: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +[DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +[DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "namespaces-9187" for this suite. 07/29/23 16:01:54.5 -STEP: Destroying namespace "nsdeletetest-5527" for this suite. 07/29/23 16:01:54.522 -Jul 29 16:01:54.532: INFO: Namespace nsdeletetest-5527 was already deleted -STEP: Destroying namespace "nsdeletetest-353" for this suite. 07/29/23 16:01:54.532 +STEP: Destroying namespace "downward-api-5981" for this suite. 08/24/23 12:05:37.727 ------------------------------ -• [SLOW TEST] [6.301 seconds] -[sig-api-machinery] Namespaces [Serial] -test/e2e/apimachinery/framework.go:23 - should ensure that all services are removed when a namespace is deleted [Conformance] - test/e2e/apimachinery/namespace.go:251 +• [SLOW TEST] [6.755 seconds] +[sig-storage] Downward API volume +test/e2e/common/storage/framework.go:23 + should update annotations on modification [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:162 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Namespaces [Serial] + [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:01:48.244 - Jul 29 16:01:48.244: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename namespaces 07/29/23 16:01:48.249 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:01:48.287 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:01:48.293 - [BeforeEach] [sig-api-machinery] Namespaces [Serial] + STEP: Creating a kubernetes client 08/24/23 12:05:30.989 + Aug 24 12:05:30.989: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:05:31.01 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:31.064 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:31.076 + [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 - [It] should ensure that all services are removed when a namespace is deleted [Conformance] - test/e2e/apimachinery/namespace.go:251 - STEP: Creating a test namespace 07/29/23 16:01:48.299 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:01:48.337 - STEP: Creating a service in the namespace 07/29/23 16:01:48.341 - STEP: Deleting the namespace 07/29/23 16:01:48.365 - STEP: Waiting for the namespace to be removed. 07/29/23 16:01:48.377 - STEP: Recreating the namespace 07/29/23 16:01:54.41 - STEP: Verifying there is no service in the namespace 07/29/23 16:01:54.474 - [AfterEach] [sig-api-machinery] Namespaces [Serial] + [BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 + [It] should update annotations on modification [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:162 + STEP: Creating the pod 08/24/23 12:05:31.086 + Aug 24 12:05:31.109: INFO: Waiting up to 5m0s for pod "annotationupdate7d969f22-c79c-459d-8168-2416c4e51539" in namespace "downward-api-5981" to be "running and ready" + Aug 24 12:05:31.121: INFO: Pod "annotationupdate7d969f22-c79c-459d-8168-2416c4e51539": Phase="Pending", Reason="", readiness=false. Elapsed: 11.513924ms + Aug 24 12:05:31.121: INFO: The phase of Pod annotationupdate7d969f22-c79c-459d-8168-2416c4e51539 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:05:33.138: INFO: Pod "annotationupdate7d969f22-c79c-459d-8168-2416c4e51539": Phase="Running", Reason="", readiness=true. Elapsed: 2.027852365s + Aug 24 12:05:33.138: INFO: The phase of Pod annotationupdate7d969f22-c79c-459d-8168-2416c4e51539 is Running (Ready = true) + Aug 24 12:05:33.138: INFO: Pod "annotationupdate7d969f22-c79c-459d-8168-2416c4e51539" satisfied condition "running and ready" + Aug 24 12:05:33.675: INFO: Successfully updated pod "annotationupdate7d969f22-c79c-459d-8168-2416c4e51539" + [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 16:01:54.482: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + Aug 24 12:05:37.716: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "namespaces-9187" for this suite. 07/29/23 16:01:54.5 - STEP: Destroying namespace "nsdeletetest-5527" for this suite. 07/29/23 16:01:54.522 - Jul 29 16:01:54.532: INFO: Namespace nsdeletetest-5527 was already deleted - STEP: Destroying namespace "nsdeletetest-353" for this suite. 07/29/23 16:01:54.532 + STEP: Destroying namespace "downward-api-5981" for this suite. 08/24/23 12:05:37.727 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPredicates [Serial] - validates resource limits of pods that are allowed to run [Conformance] - test/e2e/scheduling/predicates.go:331 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] +[sig-node] Pods + should support remote command execution over websockets [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:536 +[BeforeEach] [sig-node] Pods set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:01:54.557 -Jul 29 16:01:54.557: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sched-pred 07/29/23 16:01:54.561 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:01:54.596 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:01:54.603 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] +STEP: Creating a kubernetes client 08/24/23 12:05:37.75 +Aug 24 12:05:37.750: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 12:05:37.752 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:37.783 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:37.789 +[BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:97 -Jul 29 16:01:54.608: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready -Jul 29 16:01:54.633: INFO: Waiting for terminating namespaces to be deleted... -Jul 29 16:01:54.643: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-1 before test -Jul 29 16:01:54.663: INFO: cilium-cdv47 from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.663: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:01:54.664: INFO: cilium-node-init-jdrzm from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.664: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:01:54.664: INFO: coredns-787d4945fb-2xpvx from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.664: INFO: Container coredns ready: true, restart count 0 -Jul 29 16:01:54.664: INFO: coredns-787d4945fb-clg7z from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.664: INFO: Container coredns ready: true, restart count 0 -Jul 29 16:01:54.664: INFO: kube-addon-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.664: INFO: Container kube-addon-manager ready: true, restart count 0 -Jul 29 16:01:54.664: INFO: kube-apiserver-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.664: INFO: Container kube-apiserver ready: true, restart count 0 -Jul 29 16:01:54.665: INFO: kube-controller-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.665: INFO: Container kube-controller-manager ready: true, restart count 0 -Jul 29 16:01:54.665: INFO: kube-proxy-zc9m8 from kube-system started at 2023-07-29 15:13:58 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.665: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:01:54.665: INFO: kube-scheduler-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.665: INFO: Container kube-scheduler ready: true, restart count 0 -Jul 29 16:01:54.665: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:01:54.665: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:01:54.665: INFO: Container systemd-logs ready: true, restart count 0 -Jul 29 16:01:54.665: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-2 before test -Jul 29 16:01:54.683: INFO: cilium-kxphw from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.684: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:01:54.684: INFO: cilium-node-init-fqx5t from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.684: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:01:54.684: INFO: cilium-operator-8c499d9f6-hfgjd from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.684: INFO: Container cilium-operator ready: true, restart count 0 -Jul 29 16:01:54.684: INFO: kube-addon-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.684: INFO: Container kube-addon-manager ready: true, restart count 0 -Jul 29 16:01:54.684: INFO: kube-apiserver-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.684: INFO: Container kube-apiserver ready: true, restart count 0 -Jul 29 16:01:54.684: INFO: kube-controller-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.684: INFO: Container kube-controller-manager ready: true, restart count 0 -Jul 29 16:01:54.685: INFO: kube-proxy-gzqkk from kube-system started at 2023-07-29 15:14:12 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.685: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:01:54.685: INFO: kube-scheduler-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.685: INFO: Container kube-scheduler ready: true, restart count 0 -Jul 29 16:01:54.685: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-xnwv4 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:01:54.685: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:01:54.685: INFO: Container systemd-logs ready: true, restart count 0 -Jul 29 16:01:54.685: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-3 before test -Jul 29 16:01:54.704: INFO: cilium-node-init-9ghzk from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.704: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:01:54.704: INFO: cilium-v9c5p from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.704: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:01:54.704: INFO: kube-proxy-v77tx from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.704: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:01:54.704: INFO: netserver-2 from pod-network-test-1723 started at 2023-07-29 16:01:33 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.704: INFO: Container webserver ready: true, restart count 0 -Jul 29 16:01:54.705: INFO: test-container-pod from pod-network-test-1723 started at 2023-07-29 16:01:45 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.705: INFO: Container webserver ready: true, restart count 0 -Jul 29 16:01:54.705: INFO: sonobuoy from sonobuoy started at 2023-07-29 15:28:59 +0000 UTC (1 container statuses recorded) -Jul 29 16:01:54.705: INFO: Container kube-sonobuoy ready: true, restart count 0 -Jul 29 16:01:54.705: INFO: sonobuoy-e2e-job-7bf00df102b6496e from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:01:54.705: INFO: Container e2e ready: true, restart count 0 -Jul 29 16:01:54.705: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:01:54.705: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-k84f6 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:01:54.705: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:01:54.705: INFO: Container systemd-logs ready: true, restart count 0 -[It] validates resource limits of pods that are allowed to run [Conformance] - test/e2e/scheduling/predicates.go:331 -STEP: verifying the node has the label node wetuj3nuajog-1 07/29/23 16:01:54.759 -STEP: verifying the node has the label node wetuj3nuajog-2 07/29/23 16:01:54.807 -STEP: verifying the node has the label node wetuj3nuajog-3 07/29/23 16:01:54.958 -Jul 29 16:01:55.101: INFO: Pod cilium-cdv47 requesting resource cpu=0m on Node wetuj3nuajog-1 -Jul 29 16:01:55.101: INFO: Pod cilium-kxphw requesting resource cpu=0m on Node wetuj3nuajog-2 -Jul 29 16:01:55.101: INFO: Pod cilium-node-init-9ghzk requesting resource cpu=100m on Node wetuj3nuajog-3 -Jul 29 16:01:55.101: INFO: Pod cilium-node-init-fqx5t requesting resource cpu=100m on Node wetuj3nuajog-2 -Jul 29 16:01:55.101: INFO: Pod cilium-node-init-jdrzm requesting resource cpu=100m on Node wetuj3nuajog-1 -Jul 29 16:01:55.107: INFO: Pod cilium-operator-8c499d9f6-hfgjd requesting resource cpu=0m on Node wetuj3nuajog-2 -Jul 29 16:01:55.107: INFO: Pod cilium-v9c5p requesting resource cpu=0m on Node wetuj3nuajog-3 -Jul 29 16:01:55.107: INFO: Pod coredns-787d4945fb-2xpvx requesting resource cpu=100m on Node wetuj3nuajog-1 -Jul 29 16:01:55.108: INFO: Pod coredns-787d4945fb-clg7z requesting resource cpu=100m on Node wetuj3nuajog-1 -Jul 29 16:01:55.108: INFO: Pod kube-addon-manager-wetuj3nuajog-1 requesting resource cpu=5m on Node wetuj3nuajog-1 -Jul 29 16:01:55.108: INFO: Pod kube-addon-manager-wetuj3nuajog-2 requesting resource cpu=5m on Node wetuj3nuajog-2 -Jul 29 16:01:55.108: INFO: Pod kube-apiserver-wetuj3nuajog-1 requesting resource cpu=250m on Node wetuj3nuajog-1 -Jul 29 16:01:55.108: INFO: Pod kube-apiserver-wetuj3nuajog-2 requesting resource cpu=250m on Node wetuj3nuajog-2 -Jul 29 16:01:55.108: INFO: Pod kube-controller-manager-wetuj3nuajog-1 requesting resource cpu=200m on Node wetuj3nuajog-1 -Jul 29 16:01:55.108: INFO: Pod kube-controller-manager-wetuj3nuajog-2 requesting resource cpu=200m on Node wetuj3nuajog-2 -Jul 29 16:01:55.108: INFO: Pod kube-proxy-gzqkk requesting resource cpu=0m on Node wetuj3nuajog-2 -Jul 29 16:01:55.108: INFO: Pod kube-proxy-v77tx requesting resource cpu=0m on Node wetuj3nuajog-3 -Jul 29 16:01:55.108: INFO: Pod kube-proxy-zc9m8 requesting resource cpu=0m on Node wetuj3nuajog-1 -Jul 29 16:01:55.109: INFO: Pod kube-scheduler-wetuj3nuajog-1 requesting resource cpu=100m on Node wetuj3nuajog-1 -Jul 29 16:01:55.109: INFO: Pod kube-scheduler-wetuj3nuajog-2 requesting resource cpu=100m on Node wetuj3nuajog-2 -Jul 29 16:01:55.109: INFO: Pod sonobuoy requesting resource cpu=0m on Node wetuj3nuajog-3 -Jul 29 16:01:55.109: INFO: Pod sonobuoy-e2e-job-7bf00df102b6496e requesting resource cpu=0m on Node wetuj3nuajog-3 -Jul 29 16:01:55.109: INFO: Pod sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r requesting resource cpu=0m on Node wetuj3nuajog-1 -Jul 29 16:01:55.109: INFO: Pod sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-k84f6 requesting resource cpu=0m on Node wetuj3nuajog-3 -Jul 29 16:01:55.109: INFO: Pod sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-xnwv4 requesting resource cpu=0m on Node wetuj3nuajog-2 -STEP: Starting Pods to consume most of the cluster CPU. 07/29/23 16:01:55.109 -Jul 29 16:01:55.109: INFO: Creating a pod which consumes cpu=521m on Node wetuj3nuajog-1 -Jul 29 16:01:55.141: INFO: Creating a pod which consumes cpu=661m on Node wetuj3nuajog-2 -Jul 29 16:01:55.153: INFO: Creating a pod which consumes cpu=1050m on Node wetuj3nuajog-3 -Jul 29 16:01:55.168: INFO: Waiting up to 5m0s for pod "filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152" in namespace "sched-pred-8412" to be "running" -Jul 29 16:01:55.182: INFO: Pod "filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152": Phase="Pending", Reason="", readiness=false. Elapsed: 13.372353ms -Jul 29 16:01:57.193: INFO: Pod "filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024226484s -Jul 29 16:01:59.195: INFO: Pod "filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152": Phase="Running", Reason="", readiness=true. Elapsed: 4.026195073s -Jul 29 16:01:59.195: INFO: Pod "filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152" satisfied condition "running" -Jul 29 16:01:59.195: INFO: Waiting up to 5m0s for pod "filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba" in namespace "sched-pred-8412" to be "running" -Jul 29 16:01:59.202: INFO: Pod "filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba": Phase="Running", Reason="", readiness=true. Elapsed: 6.668031ms -Jul 29 16:01:59.202: INFO: Pod "filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba" satisfied condition "running" -Jul 29 16:01:59.202: INFO: Waiting up to 5m0s for pod "filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e" in namespace "sched-pred-8412" to be "running" -Jul 29 16:01:59.209: INFO: Pod "filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e": Phase="Running", Reason="", readiness=true. Elapsed: 6.97788ms -Jul 29 16:01:59.209: INFO: Pod "filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e" satisfied condition "running" -STEP: Creating another pod that requires unavailable amount of CPU. 07/29/23 16:01:59.209 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152.1776620c4ff1cfd9], Reason = [Scheduled], Message = [Successfully assigned sched-pred-8412/filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152 to wetuj3nuajog-1] 07/29/23 16:01:59.223 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152.1776620c8ed0a46f], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 07/29/23 16:01:59.224 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152.1776620c9bd9e780], Reason = [Created], Message = [Created container filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152] 07/29/23 16:01:59.224 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152.1776620c9ee9f487], Reason = [Started], Message = [Started container filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152] 07/29/23 16:01:59.224 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba.1776620c5351e5df], Reason = [Scheduled], Message = [Successfully assigned sched-pred-8412/filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba to wetuj3nuajog-2] 07/29/23 16:01:59.224 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba.1776620c8d1cb910], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 07/29/23 16:01:59.224 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba.1776620c98a68fb3], Reason = [Created], Message = [Created container filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba] 07/29/23 16:01:59.225 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba.1776620c9b8fb295], Reason = [Started], Message = [Started container filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba] 07/29/23 16:01:59.225 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e.1776620c5349837b], Reason = [Scheduled], Message = [Successfully assigned sched-pred-8412/filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e to wetuj3nuajog-3] 07/29/23 16:01:59.225 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e.1776620c94f389b9], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 07/29/23 16:01:59.225 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e.1776620c9d58ba51], Reason = [Created], Message = [Created container filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e] 07/29/23 16:01:59.226 -STEP: Considering event: -Type = [Normal], Name = [filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e.1776620c9ea7b775], Reason = [Started], Message = [Started container filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e] 07/29/23 16:01:59.226 -STEP: Considering event: -Type = [Warning], Name = [additional-pod.1776620d43eda2da], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 Insufficient cpu. preemption: 0/3 nodes are available: 3 No preemption victims found for incoming pod..] 07/29/23 16:01:59.251 -STEP: removing the label node off the node wetuj3nuajog-1 07/29/23 16:02:00.26 -STEP: verifying the node doesn't have the label node 07/29/23 16:02:00.29 -STEP: removing the label node off the node wetuj3nuajog-2 07/29/23 16:02:00.302 -STEP: verifying the node doesn't have the label node 07/29/23 16:02:00.334 -STEP: removing the label node off the node wetuj3nuajog-3 07/29/23 16:02:00.342 -STEP: verifying the node doesn't have the label node 07/29/23 16:02:00.375 -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] +[BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 +[It] should support remote command execution over websockets [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:536 +Aug 24 12:05:37.795: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: creating the pod 08/24/23 12:05:37.798 +STEP: submitting the pod to kubernetes 08/24/23 12:05:37.799 +Aug 24 12:05:37.820: INFO: Waiting up to 5m0s for pod "pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a" in namespace "pods-238" to be "running and ready" +Aug 24 12:05:37.831: INFO: Pod "pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a": Phase="Pending", Reason="", readiness=false. Elapsed: 10.852251ms +Aug 24 12:05:37.831: INFO: The phase of Pod pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:05:39.839: INFO: Pod "pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a": Phase="Running", Reason="", readiness=true. Elapsed: 2.019202831s +Aug 24 12:05:39.839: INFO: The phase of Pod pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a is Running (Ready = true) +Aug 24 12:05:39.839: INFO: Pod "pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a" satisfied condition "running and ready" +[AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 -Jul 29 16:02:00.392: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:88 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] +Aug 24 12:05:39.996: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] +[DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] +[DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 -STEP: Destroying namespace "sched-pred-8412" for this suite. 07/29/23 16:02:00.412 +STEP: Destroying namespace "pods-238" for this suite. 08/24/23 12:05:40.007 ------------------------------ -• [SLOW TEST] [5.879 seconds] -[sig-scheduling] SchedulerPredicates [Serial] -test/e2e/scheduling/framework.go:40 - validates resource limits of pods that are allowed to run [Conformance] - test/e2e/scheduling/predicates.go:331 +• [2.274 seconds] +[sig-node] Pods +test/e2e/common/node/framework.go:23 + should support remote command execution over websockets [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:536 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + [BeforeEach] [sig-node] Pods set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:01:54.557 - Jul 29 16:01:54.557: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sched-pred 07/29/23 16:01:54.561 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:01:54.596 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:01:54.603 - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + STEP: Creating a kubernetes client 08/24/23 12:05:37.75 + Aug 24 12:05:37.750: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 12:05:37.752 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:37.783 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:37.789 + [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:97 - Jul 29 16:01:54.608: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready - Jul 29 16:01:54.633: INFO: Waiting for terminating namespaces to be deleted... - Jul 29 16:01:54.643: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-1 before test - Jul 29 16:01:54.663: INFO: cilium-cdv47 from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.663: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:01:54.664: INFO: cilium-node-init-jdrzm from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.664: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:01:54.664: INFO: coredns-787d4945fb-2xpvx from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.664: INFO: Container coredns ready: true, restart count 0 - Jul 29 16:01:54.664: INFO: coredns-787d4945fb-clg7z from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.664: INFO: Container coredns ready: true, restart count 0 - Jul 29 16:01:54.664: INFO: kube-addon-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.664: INFO: Container kube-addon-manager ready: true, restart count 0 - Jul 29 16:01:54.664: INFO: kube-apiserver-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.664: INFO: Container kube-apiserver ready: true, restart count 0 - Jul 29 16:01:54.665: INFO: kube-controller-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.665: INFO: Container kube-controller-manager ready: true, restart count 0 - Jul 29 16:01:54.665: INFO: kube-proxy-zc9m8 from kube-system started at 2023-07-29 15:13:58 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.665: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:01:54.665: INFO: kube-scheduler-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.665: INFO: Container kube-scheduler ready: true, restart count 0 - Jul 29 16:01:54.665: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:01:54.665: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:01:54.665: INFO: Container systemd-logs ready: true, restart count 0 - Jul 29 16:01:54.665: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-2 before test - Jul 29 16:01:54.683: INFO: cilium-kxphw from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.684: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:01:54.684: INFO: cilium-node-init-fqx5t from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.684: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:01:54.684: INFO: cilium-operator-8c499d9f6-hfgjd from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.684: INFO: Container cilium-operator ready: true, restart count 0 - Jul 29 16:01:54.684: INFO: kube-addon-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.684: INFO: Container kube-addon-manager ready: true, restart count 0 - Jul 29 16:01:54.684: INFO: kube-apiserver-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.684: INFO: Container kube-apiserver ready: true, restart count 0 - Jul 29 16:01:54.684: INFO: kube-controller-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.684: INFO: Container kube-controller-manager ready: true, restart count 0 - Jul 29 16:01:54.685: INFO: kube-proxy-gzqkk from kube-system started at 2023-07-29 15:14:12 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.685: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:01:54.685: INFO: kube-scheduler-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.685: INFO: Container kube-scheduler ready: true, restart count 0 - Jul 29 16:01:54.685: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-xnwv4 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:01:54.685: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:01:54.685: INFO: Container systemd-logs ready: true, restart count 0 - Jul 29 16:01:54.685: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-3 before test - Jul 29 16:01:54.704: INFO: cilium-node-init-9ghzk from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.704: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:01:54.704: INFO: cilium-v9c5p from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.704: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:01:54.704: INFO: kube-proxy-v77tx from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.704: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:01:54.704: INFO: netserver-2 from pod-network-test-1723 started at 2023-07-29 16:01:33 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.704: INFO: Container webserver ready: true, restart count 0 - Jul 29 16:01:54.705: INFO: test-container-pod from pod-network-test-1723 started at 2023-07-29 16:01:45 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.705: INFO: Container webserver ready: true, restart count 0 - Jul 29 16:01:54.705: INFO: sonobuoy from sonobuoy started at 2023-07-29 15:28:59 +0000 UTC (1 container statuses recorded) - Jul 29 16:01:54.705: INFO: Container kube-sonobuoy ready: true, restart count 0 - Jul 29 16:01:54.705: INFO: sonobuoy-e2e-job-7bf00df102b6496e from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:01:54.705: INFO: Container e2e ready: true, restart count 0 - Jul 29 16:01:54.705: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:01:54.705: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-k84f6 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:01:54.705: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:01:54.705: INFO: Container systemd-logs ready: true, restart count 0 - [It] validates resource limits of pods that are allowed to run [Conformance] - test/e2e/scheduling/predicates.go:331 - STEP: verifying the node has the label node wetuj3nuajog-1 07/29/23 16:01:54.759 - STEP: verifying the node has the label node wetuj3nuajog-2 07/29/23 16:01:54.807 - STEP: verifying the node has the label node wetuj3nuajog-3 07/29/23 16:01:54.958 - Jul 29 16:01:55.101: INFO: Pod cilium-cdv47 requesting resource cpu=0m on Node wetuj3nuajog-1 - Jul 29 16:01:55.101: INFO: Pod cilium-kxphw requesting resource cpu=0m on Node wetuj3nuajog-2 - Jul 29 16:01:55.101: INFO: Pod cilium-node-init-9ghzk requesting resource cpu=100m on Node wetuj3nuajog-3 - Jul 29 16:01:55.101: INFO: Pod cilium-node-init-fqx5t requesting resource cpu=100m on Node wetuj3nuajog-2 - Jul 29 16:01:55.101: INFO: Pod cilium-node-init-jdrzm requesting resource cpu=100m on Node wetuj3nuajog-1 - Jul 29 16:01:55.107: INFO: Pod cilium-operator-8c499d9f6-hfgjd requesting resource cpu=0m on Node wetuj3nuajog-2 - Jul 29 16:01:55.107: INFO: Pod cilium-v9c5p requesting resource cpu=0m on Node wetuj3nuajog-3 - Jul 29 16:01:55.107: INFO: Pod coredns-787d4945fb-2xpvx requesting resource cpu=100m on Node wetuj3nuajog-1 - Jul 29 16:01:55.108: INFO: Pod coredns-787d4945fb-clg7z requesting resource cpu=100m on Node wetuj3nuajog-1 - Jul 29 16:01:55.108: INFO: Pod kube-addon-manager-wetuj3nuajog-1 requesting resource cpu=5m on Node wetuj3nuajog-1 - Jul 29 16:01:55.108: INFO: Pod kube-addon-manager-wetuj3nuajog-2 requesting resource cpu=5m on Node wetuj3nuajog-2 - Jul 29 16:01:55.108: INFO: Pod kube-apiserver-wetuj3nuajog-1 requesting resource cpu=250m on Node wetuj3nuajog-1 - Jul 29 16:01:55.108: INFO: Pod kube-apiserver-wetuj3nuajog-2 requesting resource cpu=250m on Node wetuj3nuajog-2 - Jul 29 16:01:55.108: INFO: Pod kube-controller-manager-wetuj3nuajog-1 requesting resource cpu=200m on Node wetuj3nuajog-1 - Jul 29 16:01:55.108: INFO: Pod kube-controller-manager-wetuj3nuajog-2 requesting resource cpu=200m on Node wetuj3nuajog-2 - Jul 29 16:01:55.108: INFO: Pod kube-proxy-gzqkk requesting resource cpu=0m on Node wetuj3nuajog-2 - Jul 29 16:01:55.108: INFO: Pod kube-proxy-v77tx requesting resource cpu=0m on Node wetuj3nuajog-3 - Jul 29 16:01:55.108: INFO: Pod kube-proxy-zc9m8 requesting resource cpu=0m on Node wetuj3nuajog-1 - Jul 29 16:01:55.109: INFO: Pod kube-scheduler-wetuj3nuajog-1 requesting resource cpu=100m on Node wetuj3nuajog-1 - Jul 29 16:01:55.109: INFO: Pod kube-scheduler-wetuj3nuajog-2 requesting resource cpu=100m on Node wetuj3nuajog-2 - Jul 29 16:01:55.109: INFO: Pod sonobuoy requesting resource cpu=0m on Node wetuj3nuajog-3 - Jul 29 16:01:55.109: INFO: Pod sonobuoy-e2e-job-7bf00df102b6496e requesting resource cpu=0m on Node wetuj3nuajog-3 - Jul 29 16:01:55.109: INFO: Pod sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r requesting resource cpu=0m on Node wetuj3nuajog-1 - Jul 29 16:01:55.109: INFO: Pod sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-k84f6 requesting resource cpu=0m on Node wetuj3nuajog-3 - Jul 29 16:01:55.109: INFO: Pod sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-xnwv4 requesting resource cpu=0m on Node wetuj3nuajog-2 - STEP: Starting Pods to consume most of the cluster CPU. 07/29/23 16:01:55.109 - Jul 29 16:01:55.109: INFO: Creating a pod which consumes cpu=521m on Node wetuj3nuajog-1 - Jul 29 16:01:55.141: INFO: Creating a pod which consumes cpu=661m on Node wetuj3nuajog-2 - Jul 29 16:01:55.153: INFO: Creating a pod which consumes cpu=1050m on Node wetuj3nuajog-3 - Jul 29 16:01:55.168: INFO: Waiting up to 5m0s for pod "filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152" in namespace "sched-pred-8412" to be "running" - Jul 29 16:01:55.182: INFO: Pod "filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152": Phase="Pending", Reason="", readiness=false. Elapsed: 13.372353ms - Jul 29 16:01:57.193: INFO: Pod "filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024226484s - Jul 29 16:01:59.195: INFO: Pod "filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152": Phase="Running", Reason="", readiness=true. Elapsed: 4.026195073s - Jul 29 16:01:59.195: INFO: Pod "filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152" satisfied condition "running" - Jul 29 16:01:59.195: INFO: Waiting up to 5m0s for pod "filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba" in namespace "sched-pred-8412" to be "running" - Jul 29 16:01:59.202: INFO: Pod "filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba": Phase="Running", Reason="", readiness=true. Elapsed: 6.668031ms - Jul 29 16:01:59.202: INFO: Pod "filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba" satisfied condition "running" - Jul 29 16:01:59.202: INFO: Waiting up to 5m0s for pod "filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e" in namespace "sched-pred-8412" to be "running" - Jul 29 16:01:59.209: INFO: Pod "filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e": Phase="Running", Reason="", readiness=true. Elapsed: 6.97788ms - Jul 29 16:01:59.209: INFO: Pod "filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e" satisfied condition "running" - STEP: Creating another pod that requires unavailable amount of CPU. 07/29/23 16:01:59.209 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152.1776620c4ff1cfd9], Reason = [Scheduled], Message = [Successfully assigned sched-pred-8412/filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152 to wetuj3nuajog-1] 07/29/23 16:01:59.223 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152.1776620c8ed0a46f], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 07/29/23 16:01:59.224 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152.1776620c9bd9e780], Reason = [Created], Message = [Created container filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152] 07/29/23 16:01:59.224 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152.1776620c9ee9f487], Reason = [Started], Message = [Started container filler-pod-1e2dbce6-b8b4-40ba-a723-9af59cfc8152] 07/29/23 16:01:59.224 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba.1776620c5351e5df], Reason = [Scheduled], Message = [Successfully assigned sched-pred-8412/filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba to wetuj3nuajog-2] 07/29/23 16:01:59.224 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba.1776620c8d1cb910], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 07/29/23 16:01:59.224 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba.1776620c98a68fb3], Reason = [Created], Message = [Created container filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba] 07/29/23 16:01:59.225 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba.1776620c9b8fb295], Reason = [Started], Message = [Started container filler-pod-3e39ad7c-a608-476d-b2aa-9e39a97053ba] 07/29/23 16:01:59.225 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e.1776620c5349837b], Reason = [Scheduled], Message = [Successfully assigned sched-pred-8412/filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e to wetuj3nuajog-3] 07/29/23 16:01:59.225 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e.1776620c94f389b9], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 07/29/23 16:01:59.225 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e.1776620c9d58ba51], Reason = [Created], Message = [Created container filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e] 07/29/23 16:01:59.226 - STEP: Considering event: - Type = [Normal], Name = [filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e.1776620c9ea7b775], Reason = [Started], Message = [Started container filler-pod-b89042ac-3ccb-4542-8131-b2826a488f7e] 07/29/23 16:01:59.226 - STEP: Considering event: - Type = [Warning], Name = [additional-pod.1776620d43eda2da], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 Insufficient cpu. preemption: 0/3 nodes are available: 3 No preemption victims found for incoming pod..] 07/29/23 16:01:59.251 - STEP: removing the label node off the node wetuj3nuajog-1 07/29/23 16:02:00.26 - STEP: verifying the node doesn't have the label node 07/29/23 16:02:00.29 - STEP: removing the label node off the node wetuj3nuajog-2 07/29/23 16:02:00.302 - STEP: verifying the node doesn't have the label node 07/29/23 16:02:00.334 - STEP: removing the label node off the node wetuj3nuajog-3 07/29/23 16:02:00.342 - STEP: verifying the node doesn't have the label node 07/29/23 16:02:00.375 - [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + [BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 + [It] should support remote command execution over websockets [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:536 + Aug 24 12:05:37.795: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: creating the pod 08/24/23 12:05:37.798 + STEP: submitting the pod to kubernetes 08/24/23 12:05:37.799 + Aug 24 12:05:37.820: INFO: Waiting up to 5m0s for pod "pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a" in namespace "pods-238" to be "running and ready" + Aug 24 12:05:37.831: INFO: Pod "pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a": Phase="Pending", Reason="", readiness=false. Elapsed: 10.852251ms + Aug 24 12:05:37.831: INFO: The phase of Pod pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:05:39.839: INFO: Pod "pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a": Phase="Running", Reason="", readiness=true. Elapsed: 2.019202831s + Aug 24 12:05:39.839: INFO: The phase of Pod pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a is Running (Ready = true) + Aug 24 12:05:39.839: INFO: Pod "pod-exec-websocket-6ae657dc-92eb-4e3d-b650-d679b9c16a2a" satisfied condition "running and ready" + [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 - Jul 29 16:02:00.392: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:88 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + Aug 24 12:05:39.996: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 - STEP: Destroying namespace "sched-pred-8412" for this suite. 07/29/23 16:02:00.412 + STEP: Destroying namespace "pods-238" for this suite. 08/24/23 12:05:40.007 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] ControllerRevision [Serial] - should manage the lifecycle of a ControllerRevision [Conformance] - test/e2e/apps/controller_revision.go:124 -[BeforeEach] [sig-apps] ControllerRevision [Serial] +[sig-storage] Downward API volume + should provide container's memory limit [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:207 +[BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:02:00.451 -Jul 29 16:02:00.451: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename controllerrevisions 07/29/23 16:02:00.456 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:00.519 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:00.525 -[BeforeEach] [sig-apps] ControllerRevision [Serial] +STEP: Creating a kubernetes client 08/24/23 12:05:40.033 +Aug 24 12:05:40.033: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:05:40.035 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:40.071 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:40.076 +[BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] ControllerRevision [Serial] - test/e2e/apps/controller_revision.go:93 -[It] should manage the lifecycle of a ControllerRevision [Conformance] - test/e2e/apps/controller_revision.go:124 -STEP: Creating DaemonSet "e2e-5ssbr-daemon-set" 07/29/23 16:02:00.581 -STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 16:02:00.594 -Jul 29 16:02:00.619: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 0 -Jul 29 16:02:00.619: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:02:01.644: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 0 -Jul 29 16:02:01.644: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:02:02.636: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 1 -Jul 29 16:02:02.636: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:02:03.647: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 2 -Jul 29 16:02:03.647: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 -Jul 29 16:02:04.639: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 3 -Jul 29 16:02:04.640: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset e2e-5ssbr-daemon-set -STEP: Confirm DaemonSet "e2e-5ssbr-daemon-set" successfully created with "daemonset-name=e2e-5ssbr-daemon-set" label 07/29/23 16:02:04.646 -STEP: Listing all ControllerRevisions with label "daemonset-name=e2e-5ssbr-daemon-set" 07/29/23 16:02:04.664 -Jul 29 16:02:04.672: INFO: Located ControllerRevision: "e2e-5ssbr-daemon-set-646f95b4c4" -STEP: Patching ControllerRevision "e2e-5ssbr-daemon-set-646f95b4c4" 07/29/23 16:02:04.679 -Jul 29 16:02:04.691: INFO: e2e-5ssbr-daemon-set-646f95b4c4 has been patched -STEP: Create a new ControllerRevision 07/29/23 16:02:04.691 -Jul 29 16:02:04.703: INFO: Created ControllerRevision: e2e-5ssbr-daemon-set-5497b45896 -STEP: Confirm that there are two ControllerRevisions 07/29/23 16:02:04.703 -Jul 29 16:02:04.704: INFO: Requesting list of ControllerRevisions to confirm quantity -Jul 29 16:02:04.713: INFO: Found 2 ControllerRevisions -STEP: Deleting ControllerRevision "e2e-5ssbr-daemon-set-646f95b4c4" 07/29/23 16:02:04.713 -STEP: Confirm that there is only one ControllerRevision 07/29/23 16:02:04.729 -Jul 29 16:02:04.730: INFO: Requesting list of ControllerRevisions to confirm quantity -Jul 29 16:02:04.737: INFO: Found 1 ControllerRevisions -STEP: Updating ControllerRevision "e2e-5ssbr-daemon-set-5497b45896" 07/29/23 16:02:04.745 -Jul 29 16:02:04.764: INFO: e2e-5ssbr-daemon-set-5497b45896 has been updated -STEP: Generate another ControllerRevision by patching the Daemonset 07/29/23 16:02:04.765 -W0729 16:02:04.785371 13 warnings.go:70] unknown field "updateStrategy" -STEP: Confirm that there are two ControllerRevisions 07/29/23 16:02:04.785 -Jul 29 16:02:04.785: INFO: Requesting list of ControllerRevisions to confirm quantity -Jul 29 16:02:05.802: INFO: Requesting list of ControllerRevisions to confirm quantity -Jul 29 16:02:05.811: INFO: Found 2 ControllerRevisions -STEP: Removing a ControllerRevision via 'DeleteCollection' with labelSelector: "e2e-5ssbr-daemon-set-5497b45896=updated" 07/29/23 16:02:05.811 -STEP: Confirm that there is only one ControllerRevision 07/29/23 16:02:05.829 -Jul 29 16:02:05.829: INFO: Requesting list of ControllerRevisions to confirm quantity -Jul 29 16:02:05.836: INFO: Found 1 ControllerRevisions -Jul 29 16:02:05.841: INFO: ControllerRevision "e2e-5ssbr-daemon-set-7b945bf4f" has revision 3 -[AfterEach] [sig-apps] ControllerRevision [Serial] - test/e2e/apps/controller_revision.go:58 -STEP: Deleting DaemonSet "e2e-5ssbr-daemon-set" 07/29/23 16:02:05.847 -STEP: deleting DaemonSet.extensions e2e-5ssbr-daemon-set in namespace controllerrevisions-7720, will wait for the garbage collector to delete the pods 07/29/23 16:02:05.848 -Jul 29 16:02:05.922: INFO: Deleting DaemonSet.extensions e2e-5ssbr-daemon-set took: 16.080362ms -Jul 29 16:02:06.023: INFO: Terminating DaemonSet.extensions e2e-5ssbr-daemon-set pods took: 100.634379ms -Jul 29 16:02:07.530: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 0 -Jul 29 16:02:07.530: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset e2e-5ssbr-daemon-set -Jul 29 16:02:07.542: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"14110"},"items":null} - -Jul 29 16:02:07.551: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"14110"},"items":null} - -[AfterEach] [sig-apps] ControllerRevision [Serial] +[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 +[It] should provide container's memory limit [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:207 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:05:40.082 +Aug 24 12:05:40.101: INFO: Waiting up to 5m0s for pod "downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90" in namespace "downward-api-8606" to be "Succeeded or Failed" +Aug 24 12:05:40.111: INFO: Pod "downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90": Phase="Pending", Reason="", readiness=false. Elapsed: 9.554132ms +Aug 24 12:05:42.122: INFO: Pod "downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021036023s +Aug 24 12:05:44.118: INFO: Pod "downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017008637s +STEP: Saw pod success 08/24/23 12:05:44.118 +Aug 24 12:05:44.119: INFO: Pod "downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90" satisfied condition "Succeeded or Failed" +Aug 24 12:05:44.126: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90 container client-container: +STEP: delete the pod 08/24/23 12:05:44.138 +Aug 24 12:05:44.166: INFO: Waiting for pod downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90 to disappear +Aug 24 12:05:44.171: INFO: Pod downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90 no longer exists +[AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 16:02:07.587: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] +Aug 24 12:05:44.172: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] +[DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] +[DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "controllerrevisions-7720" for this suite. 07/29/23 16:02:07.603 +STEP: Destroying namespace "downward-api-8606" for this suite. 08/24/23 12:05:44.182 ------------------------------ -• [SLOW TEST] [7.179 seconds] -[sig-apps] ControllerRevision [Serial] -test/e2e/apps/framework.go:23 - should manage the lifecycle of a ControllerRevision [Conformance] - test/e2e/apps/controller_revision.go:124 +• [4.161 seconds] +[sig-storage] Downward API volume +test/e2e/common/storage/framework.go:23 + should provide container's memory limit [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:207 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ControllerRevision [Serial] + [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:02:00.451 - Jul 29 16:02:00.451: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename controllerrevisions 07/29/23 16:02:00.456 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:00.519 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:00.525 - [BeforeEach] [sig-apps] ControllerRevision [Serial] + STEP: Creating a kubernetes client 08/24/23 12:05:40.033 + Aug 24 12:05:40.033: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:05:40.035 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:40.071 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:40.076 + [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] ControllerRevision [Serial] - test/e2e/apps/controller_revision.go:93 - [It] should manage the lifecycle of a ControllerRevision [Conformance] - test/e2e/apps/controller_revision.go:124 - STEP: Creating DaemonSet "e2e-5ssbr-daemon-set" 07/29/23 16:02:00.581 - STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 16:02:00.594 - Jul 29 16:02:00.619: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 0 - Jul 29 16:02:00.619: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:02:01.644: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 0 - Jul 29 16:02:01.644: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:02:02.636: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 1 - Jul 29 16:02:02.636: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:02:03.647: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 2 - Jul 29 16:02:03.647: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 - Jul 29 16:02:04.639: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 3 - Jul 29 16:02:04.640: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset e2e-5ssbr-daemon-set - STEP: Confirm DaemonSet "e2e-5ssbr-daemon-set" successfully created with "daemonset-name=e2e-5ssbr-daemon-set" label 07/29/23 16:02:04.646 - STEP: Listing all ControllerRevisions with label "daemonset-name=e2e-5ssbr-daemon-set" 07/29/23 16:02:04.664 - Jul 29 16:02:04.672: INFO: Located ControllerRevision: "e2e-5ssbr-daemon-set-646f95b4c4" - STEP: Patching ControllerRevision "e2e-5ssbr-daemon-set-646f95b4c4" 07/29/23 16:02:04.679 - Jul 29 16:02:04.691: INFO: e2e-5ssbr-daemon-set-646f95b4c4 has been patched - STEP: Create a new ControllerRevision 07/29/23 16:02:04.691 - Jul 29 16:02:04.703: INFO: Created ControllerRevision: e2e-5ssbr-daemon-set-5497b45896 - STEP: Confirm that there are two ControllerRevisions 07/29/23 16:02:04.703 - Jul 29 16:02:04.704: INFO: Requesting list of ControllerRevisions to confirm quantity - Jul 29 16:02:04.713: INFO: Found 2 ControllerRevisions - STEP: Deleting ControllerRevision "e2e-5ssbr-daemon-set-646f95b4c4" 07/29/23 16:02:04.713 - STEP: Confirm that there is only one ControllerRevision 07/29/23 16:02:04.729 - Jul 29 16:02:04.730: INFO: Requesting list of ControllerRevisions to confirm quantity - Jul 29 16:02:04.737: INFO: Found 1 ControllerRevisions - STEP: Updating ControllerRevision "e2e-5ssbr-daemon-set-5497b45896" 07/29/23 16:02:04.745 - Jul 29 16:02:04.764: INFO: e2e-5ssbr-daemon-set-5497b45896 has been updated - STEP: Generate another ControllerRevision by patching the Daemonset 07/29/23 16:02:04.765 - W0729 16:02:04.785371 13 warnings.go:70] unknown field "updateStrategy" - STEP: Confirm that there are two ControllerRevisions 07/29/23 16:02:04.785 - Jul 29 16:02:04.785: INFO: Requesting list of ControllerRevisions to confirm quantity - Jul 29 16:02:05.802: INFO: Requesting list of ControllerRevisions to confirm quantity - Jul 29 16:02:05.811: INFO: Found 2 ControllerRevisions - STEP: Removing a ControllerRevision via 'DeleteCollection' with labelSelector: "e2e-5ssbr-daemon-set-5497b45896=updated" 07/29/23 16:02:05.811 - STEP: Confirm that there is only one ControllerRevision 07/29/23 16:02:05.829 - Jul 29 16:02:05.829: INFO: Requesting list of ControllerRevisions to confirm quantity - Jul 29 16:02:05.836: INFO: Found 1 ControllerRevisions - Jul 29 16:02:05.841: INFO: ControllerRevision "e2e-5ssbr-daemon-set-7b945bf4f" has revision 3 - [AfterEach] [sig-apps] ControllerRevision [Serial] - test/e2e/apps/controller_revision.go:58 - STEP: Deleting DaemonSet "e2e-5ssbr-daemon-set" 07/29/23 16:02:05.847 - STEP: deleting DaemonSet.extensions e2e-5ssbr-daemon-set in namespace controllerrevisions-7720, will wait for the garbage collector to delete the pods 07/29/23 16:02:05.848 - Jul 29 16:02:05.922: INFO: Deleting DaemonSet.extensions e2e-5ssbr-daemon-set took: 16.080362ms - Jul 29 16:02:06.023: INFO: Terminating DaemonSet.extensions e2e-5ssbr-daemon-set pods took: 100.634379ms - Jul 29 16:02:07.530: INFO: Number of nodes with available pods controlled by daemonset e2e-5ssbr-daemon-set: 0 - Jul 29 16:02:07.530: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset e2e-5ssbr-daemon-set - Jul 29 16:02:07.542: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"14110"},"items":null} - - Jul 29 16:02:07.551: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"14110"},"items":null} - - [AfterEach] [sig-apps] ControllerRevision [Serial] + [BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 + [It] should provide container's memory limit [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:207 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:05:40.082 + Aug 24 12:05:40.101: INFO: Waiting up to 5m0s for pod "downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90" in namespace "downward-api-8606" to be "Succeeded or Failed" + Aug 24 12:05:40.111: INFO: Pod "downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90": Phase="Pending", Reason="", readiness=false. Elapsed: 9.554132ms + Aug 24 12:05:42.122: INFO: Pod "downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021036023s + Aug 24 12:05:44.118: INFO: Pod "downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017008637s + STEP: Saw pod success 08/24/23 12:05:44.118 + Aug 24 12:05:44.119: INFO: Pod "downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90" satisfied condition "Succeeded or Failed" + Aug 24 12:05:44.126: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90 container client-container: + STEP: delete the pod 08/24/23 12:05:44.138 + Aug 24 12:05:44.166: INFO: Waiting for pod downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90 to disappear + Aug 24 12:05:44.171: INFO: Pod downwardapi-volume-630de7db-62f0-4385-be1b-1b3e81659c90 no longer exists + [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 16:02:07.587: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] + Aug 24 12:05:44.172: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] + [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ControllerRevision [Serial] + [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "controllerrevisions-7720" for this suite. 07/29/23 16:02:07.603 + STEP: Destroying namespace "downward-api-8606" for this suite. 08/24/23 12:05:44.182 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-storage] Secrets - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:79 -[BeforeEach] [sig-storage] Secrets +[sig-node] Container Runtime blackbox test on terminated container + should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:232 +[BeforeEach] [sig-node] Container Runtime set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:02:07.653 -Jul 29 16:02:07.653: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 16:02:07.658 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:07.682 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:07.688 -[BeforeEach] [sig-storage] Secrets +STEP: Creating a kubernetes client 08/24/23 12:05:44.196 +Aug 24 12:05:44.196: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-runtime 08/24/23 12:05:44.199 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:44.225 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:44.231 +[BeforeEach] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:79 -STEP: Creating secret with name secret-test-map-853886b8-0fa8-432a-97c7-1e1aa9fb00f3 07/29/23 16:02:07.697 -STEP: Creating a pod to test consume secrets 07/29/23 16:02:07.707 -Jul 29 16:02:07.723: INFO: Waiting up to 5m0s for pod "pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168" in namespace "secrets-5711" to be "Succeeded or Failed" -Jul 29 16:02:07.729: INFO: Pod "pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168": Phase="Pending", Reason="", readiness=false. Elapsed: 5.042882ms -Jul 29 16:02:09.739: INFO: Pod "pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015489014s -Jul 29 16:02:11.734: INFO: Pod "pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010901718s -STEP: Saw pod success 07/29/23 16:02:11.735 -Jul 29 16:02:11.735: INFO: Pod "pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168" satisfied condition "Succeeded or Failed" -Jul 29 16:02:11.754: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168 container secret-volume-test: -STEP: delete the pod 07/29/23 16:02:11.795 -Jul 29 16:02:11.834: INFO: Waiting for pod pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168 to disappear -Jul 29 16:02:11.840: INFO: Pod pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168 no longer exists -[AfterEach] [sig-storage] Secrets +[It] should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:232 +STEP: create the container 08/24/23 12:05:44.237 +STEP: wait for the container to reach Succeeded 08/24/23 12:05:44.253 +STEP: get the container status 08/24/23 12:05:47.281 +STEP: the container should be terminated 08/24/23 12:05:47.288 +STEP: the termination message should be set 08/24/23 12:05:47.288 +Aug 24 12:05:47.288: INFO: Expected: &{} to match Container's Termination Message: -- +STEP: delete the container 08/24/23 12:05:47.288 +[AfterEach] [sig-node] Container Runtime test/e2e/framework/node/init/init.go:32 -Jul 29 16:02:11.840: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Secrets +Aug 24 12:05:47.316: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-node] Container Runtime dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-node] Container Runtime tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-5711" for this suite. 07/29/23 16:02:11.855 +STEP: Destroying namespace "container-runtime-9501" for this suite. 08/24/23 12:05:47.325 ------------------------------ -• [4.218 seconds] -[sig-storage] Secrets -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:79 +• [3.140 seconds] +[sig-node] Container Runtime +test/e2e/common/node/framework.go:23 + blackbox test + test/e2e/common/node/runtime.go:44 + on terminated container + test/e2e/common/node/runtime.go:137 + should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:232 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Secrets + [BeforeEach] [sig-node] Container Runtime set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:02:07.653 - Jul 29 16:02:07.653: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 16:02:07.658 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:07.682 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:07.688 - [BeforeEach] [sig-storage] Secrets + STEP: Creating a kubernetes client 08/24/23 12:05:44.196 + Aug 24 12:05:44.196: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-runtime 08/24/23 12:05:44.199 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:44.225 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:44.231 + [BeforeEach] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:79 - STEP: Creating secret with name secret-test-map-853886b8-0fa8-432a-97c7-1e1aa9fb00f3 07/29/23 16:02:07.697 - STEP: Creating a pod to test consume secrets 07/29/23 16:02:07.707 - Jul 29 16:02:07.723: INFO: Waiting up to 5m0s for pod "pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168" in namespace "secrets-5711" to be "Succeeded or Failed" - Jul 29 16:02:07.729: INFO: Pod "pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168": Phase="Pending", Reason="", readiness=false. Elapsed: 5.042882ms - Jul 29 16:02:09.739: INFO: Pod "pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015489014s - Jul 29 16:02:11.734: INFO: Pod "pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.010901718s - STEP: Saw pod success 07/29/23 16:02:11.735 - Jul 29 16:02:11.735: INFO: Pod "pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168" satisfied condition "Succeeded or Failed" - Jul 29 16:02:11.754: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168 container secret-volume-test: - STEP: delete the pod 07/29/23 16:02:11.795 - Jul 29 16:02:11.834: INFO: Waiting for pod pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168 to disappear - Jul 29 16:02:11.840: INFO: Pod pod-secrets-d325ef0c-5065-4026-a66b-6a59347bc168 no longer exists - [AfterEach] [sig-storage] Secrets + [It] should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:232 + STEP: create the container 08/24/23 12:05:44.237 + STEP: wait for the container to reach Succeeded 08/24/23 12:05:44.253 + STEP: get the container status 08/24/23 12:05:47.281 + STEP: the container should be terminated 08/24/23 12:05:47.288 + STEP: the termination message should be set 08/24/23 12:05:47.288 + Aug 24 12:05:47.288: INFO: Expected: &{} to match Container's Termination Message: -- + STEP: delete the container 08/24/23 12:05:47.288 + [AfterEach] [sig-node] Container Runtime test/e2e/framework/node/init/init.go:32 - Jul 29 16:02:11.840: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Secrets + Aug 24 12:05:47.316: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-node] Container Runtime dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-node] Container Runtime tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-5711" for this suite. 07/29/23 16:02:11.855 + STEP: Destroying namespace "container-runtime-9501" for this suite. 08/24/23 12:05:47.325 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] - test/e2e/network/service.go:2191 -[BeforeEach] [sig-network] Services +[sig-apps] ReplicaSet + should serve a basic image on each replica with a public image [Conformance] + test/e2e/apps/replica_set.go:111 +[BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:02:11.878 -Jul 29 16:02:11.878: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 16:02:11.881 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:11.952 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:11.957 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 12:05:47.352 +Aug 24 12:05:47.352: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replicaset 08/24/23 12:05:47.355 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:47.382 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:47.388 +[BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] - test/e2e/network/service.go:2191 -STEP: creating service in namespace services-1226 07/29/23 16:02:11.962 -STEP: creating service affinity-clusterip in namespace services-1226 07/29/23 16:02:11.962 -STEP: creating replication controller affinity-clusterip in namespace services-1226 07/29/23 16:02:11.986 -I0729 16:02:12.010206 13 runners.go:193] Created replication controller with name: affinity-clusterip, namespace: services-1226, replica count: 3 -I0729 16:02:15.061571 13 runners.go:193] affinity-clusterip Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Jul 29 16:02:15.076: INFO: Creating new exec pod -Jul 29 16:02:15.102: INFO: Waiting up to 5m0s for pod "execpod-affinitywrn26" in namespace "services-1226" to be "running" -Jul 29 16:02:15.116: INFO: Pod "execpod-affinitywrn26": Phase="Pending", Reason="", readiness=false. Elapsed: 14.382302ms -Jul 29 16:02:17.126: INFO: Pod "execpod-affinitywrn26": Phase="Running", Reason="", readiness=true. Elapsed: 2.023571599s -Jul 29 16:02:17.126: INFO: Pod "execpod-affinitywrn26" satisfied condition "running" -Jul 29 16:02:18.130: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-1226 exec execpod-affinitywrn26 -- /bin/sh -x -c nc -v -z -w 2 affinity-clusterip 80' -Jul 29 16:02:18.457: INFO: stderr: "+ nc -v -z -w 2 affinity-clusterip 80\nConnection to affinity-clusterip 80 port [tcp/http] succeeded!\n" -Jul 29 16:02:18.458: INFO: stdout: "" -Jul 29 16:02:18.458: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-1226 exec execpod-affinitywrn26 -- /bin/sh -x -c nc -v -z -w 2 10.233.63.161 80' -Jul 29 16:02:18.718: INFO: stderr: "+ nc -v -z -w 2 10.233.63.161 80\nConnection to 10.233.63.161 80 port [tcp/http] succeeded!\n" -Jul 29 16:02:18.719: INFO: stdout: "" -Jul 29 16:02:18.719: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-1226 exec execpod-affinitywrn26 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.63.161:80/ ; done' -Jul 29 16:02:19.249: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n" -Jul 29 16:02:19.249: INFO: stdout: "\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv" -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv -Jul 29 16:02:19.249: INFO: Cleaning up the exec pod -STEP: deleting ReplicationController affinity-clusterip in namespace services-1226, will wait for the garbage collector to delete the pods 07/29/23 16:02:19.274 -Jul 29 16:02:19.349: INFO: Deleting ReplicationController affinity-clusterip took: 15.996514ms -Jul 29 16:02:19.449: INFO: Terminating ReplicationController affinity-clusterip pods took: 100.20175ms -[AfterEach] [sig-network] Services +[It] should serve a basic image on each replica with a public image [Conformance] + test/e2e/apps/replica_set.go:111 +Aug 24 12:05:47.394: INFO: Creating ReplicaSet my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce +Aug 24 12:05:47.407: INFO: Pod name my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce: Found 0 pods out of 1 +Aug 24 12:05:52.418: INFO: Pod name my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce: Found 1 pods out of 1 +Aug 24 12:05:52.418: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce" is running +Aug 24 12:05:52.418: INFO: Waiting up to 5m0s for pod "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479" in namespace "replicaset-8202" to be "running" +Aug 24 12:05:52.425: INFO: Pod "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479": Phase="Running", Reason="", readiness=true. Elapsed: 7.00628ms +Aug 24 12:05:52.425: INFO: Pod "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479" satisfied condition "running" +Aug 24 12:05:52.425: INFO: Pod "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:05:47 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:05:48 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:05:48 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:05:47 +0000 UTC Reason: Message:}]) +Aug 24 12:05:52.425: INFO: Trying to dial the pod +Aug 24 12:05:57.450: INFO: Controller my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce: Got expected result from replica 1 [my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479]: "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 -Jul 29 16:02:21.679: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 12:05:57.450: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 -STEP: Destroying namespace "services-1226" for this suite. 07/29/23 16:02:21.689 +STEP: Destroying namespace "replicaset-8202" for this suite. 08/24/23 12:05:57.461 ------------------------------ -• [SLOW TEST] [9.827 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] - test/e2e/network/service.go:2191 +• [SLOW TEST] [10.123 seconds] +[sig-apps] ReplicaSet +test/e2e/apps/framework.go:23 + should serve a basic image on each replica with a public image [Conformance] + test/e2e/apps/replica_set.go:111 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:02:11.878 - Jul 29 16:02:11.878: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 16:02:11.881 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:11.952 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:11.957 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 12:05:47.352 + Aug 24 12:05:47.352: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replicaset 08/24/23 12:05:47.355 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:47.382 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:47.388 + [BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] - test/e2e/network/service.go:2191 - STEP: creating service in namespace services-1226 07/29/23 16:02:11.962 - STEP: creating service affinity-clusterip in namespace services-1226 07/29/23 16:02:11.962 - STEP: creating replication controller affinity-clusterip in namespace services-1226 07/29/23 16:02:11.986 - I0729 16:02:12.010206 13 runners.go:193] Created replication controller with name: affinity-clusterip, namespace: services-1226, replica count: 3 - I0729 16:02:15.061571 13 runners.go:193] affinity-clusterip Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - Jul 29 16:02:15.076: INFO: Creating new exec pod - Jul 29 16:02:15.102: INFO: Waiting up to 5m0s for pod "execpod-affinitywrn26" in namespace "services-1226" to be "running" - Jul 29 16:02:15.116: INFO: Pod "execpod-affinitywrn26": Phase="Pending", Reason="", readiness=false. Elapsed: 14.382302ms - Jul 29 16:02:17.126: INFO: Pod "execpod-affinitywrn26": Phase="Running", Reason="", readiness=true. Elapsed: 2.023571599s - Jul 29 16:02:17.126: INFO: Pod "execpod-affinitywrn26" satisfied condition "running" - Jul 29 16:02:18.130: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-1226 exec execpod-affinitywrn26 -- /bin/sh -x -c nc -v -z -w 2 affinity-clusterip 80' - Jul 29 16:02:18.457: INFO: stderr: "+ nc -v -z -w 2 affinity-clusterip 80\nConnection to affinity-clusterip 80 port [tcp/http] succeeded!\n" - Jul 29 16:02:18.458: INFO: stdout: "" - Jul 29 16:02:18.458: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-1226 exec execpod-affinitywrn26 -- /bin/sh -x -c nc -v -z -w 2 10.233.63.161 80' - Jul 29 16:02:18.718: INFO: stderr: "+ nc -v -z -w 2 10.233.63.161 80\nConnection to 10.233.63.161 80 port [tcp/http] succeeded!\n" - Jul 29 16:02:18.719: INFO: stdout: "" - Jul 29 16:02:18.719: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-1226 exec execpod-affinitywrn26 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.63.161:80/ ; done' - Jul 29 16:02:19.249: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.63.161:80/\n" - Jul 29 16:02:19.249: INFO: stdout: "\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv\naffinity-clusterip-bv7lv" - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Received response from host: affinity-clusterip-bv7lv - Jul 29 16:02:19.249: INFO: Cleaning up the exec pod - STEP: deleting ReplicationController affinity-clusterip in namespace services-1226, will wait for the garbage collector to delete the pods 07/29/23 16:02:19.274 - Jul 29 16:02:19.349: INFO: Deleting ReplicationController affinity-clusterip took: 15.996514ms - Jul 29 16:02:19.449: INFO: Terminating ReplicationController affinity-clusterip pods took: 100.20175ms - [AfterEach] [sig-network] Services + [It] should serve a basic image on each replica with a public image [Conformance] + test/e2e/apps/replica_set.go:111 + Aug 24 12:05:47.394: INFO: Creating ReplicaSet my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce + Aug 24 12:05:47.407: INFO: Pod name my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce: Found 0 pods out of 1 + Aug 24 12:05:52.418: INFO: Pod name my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce: Found 1 pods out of 1 + Aug 24 12:05:52.418: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce" is running + Aug 24 12:05:52.418: INFO: Waiting up to 5m0s for pod "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479" in namespace "replicaset-8202" to be "running" + Aug 24 12:05:52.425: INFO: Pod "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479": Phase="Running", Reason="", readiness=true. Elapsed: 7.00628ms + Aug 24 12:05:52.425: INFO: Pod "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479" satisfied condition "running" + Aug 24 12:05:52.425: INFO: Pod "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:05:47 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:05:48 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:05:48 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:05:47 +0000 UTC Reason: Message:}]) + Aug 24 12:05:52.425: INFO: Trying to dial the pod + Aug 24 12:05:57.450: INFO: Controller my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce: Got expected result from replica 1 [my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479]: "my-hostname-basic-a31f4a5b-9411-4160-9537-d3c30c7164ce-ls479", 1 of 1 required successes so far + [AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 - Jul 29 16:02:21.679: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 12:05:57.450: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 - STEP: Destroying namespace "services-1226" for this suite. 07/29/23 16:02:21.689 + STEP: Destroying namespace "replicaset-8202" for this suite. 08/24/23 12:05:57.461 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SS ------------------------------ -[sig-api-machinery] Garbage collector - should delete RS created by deployment when not orphaning [Conformance] - test/e2e/apimachinery/garbage_collector.go:491 -[BeforeEach] [sig-api-machinery] Garbage collector +[sig-storage] Projected configMap + updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:124 +[BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:02:21.71 -Jul 29 16:02:21.710: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename gc 07/29/23 16:02:21.712 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:21.75 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:21.758 -[BeforeEach] [sig-api-machinery] Garbage collector +STEP: Creating a kubernetes client 08/24/23 12:05:57.48 +Aug 24 12:05:57.480: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:05:57.483 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:57.52 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:57.523 +[BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 -[It] should delete RS created by deployment when not orphaning [Conformance] - test/e2e/apimachinery/garbage_collector.go:491 -STEP: create the deployment 07/29/23 16:02:21.765 -STEP: Wait for the Deployment to create new ReplicaSet 07/29/23 16:02:21.777 -STEP: delete the deployment 07/29/23 16:02:22.301 -STEP: wait for all rs to be garbage collected 07/29/23 16:02:22.332 -STEP: expected 0 rs, got 1 rs 07/29/23 16:02:22.345 -STEP: expected 0 pods, got 2 pods 07/29/23 16:02:22.354 -STEP: Gathering metrics 07/29/23 16:02:22.879 -Jul 29 16:02:22.996: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" -Jul 29 16:02:23.016: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 19.856118ms -Jul 29 16:02:23.016: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) -Jul 29 16:02:23.016: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" -Jul 29 16:02:23.170: INFO: For apiserver_request_total: -For apiserver_request_latency_seconds: -For apiserver_init_events_total: -For garbage_collector_attempt_to_delete_queue_latency: -For garbage_collector_attempt_to_delete_work_duration: -For garbage_collector_attempt_to_orphan_queue_latency: -For garbage_collector_attempt_to_orphan_work_duration: -For garbage_collector_dirty_processing_latency_microseconds: -For garbage_collector_event_processing_latency_microseconds: -For garbage_collector_graph_changes_queue_latency: -For garbage_collector_graph_changes_work_duration: -For garbage_collector_orphan_processing_latency_microseconds: -For namespace_queue_latency: -For namespace_queue_latency_sum: -For namespace_queue_latency_count: -For namespace_retries: -For namespace_work_duration: -For namespace_work_duration_sum: -For namespace_work_duration_count: -For function_duration_seconds: -For errors_total: -For evicted_pods_total: - -[AfterEach] [sig-api-machinery] Garbage collector +[It] updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:124 +STEP: Creating projection with configMap that has name projected-configmap-test-upd-8b7567d7-2a9f-4dad-9e60-ffe4121282c8 08/24/23 12:05:57.534 +STEP: Creating the pod 08/24/23 12:05:57.54 +Aug 24 12:05:57.554: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b" in namespace "projected-5905" to be "running and ready" +Aug 24 12:05:57.560: INFO: Pod "pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b": Phase="Pending", Reason="", readiness=false. Elapsed: 6.274666ms +Aug 24 12:05:57.560: INFO: The phase of Pod pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:05:59.569: INFO: Pod "pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b": Phase="Running", Reason="", readiness=true. Elapsed: 2.015087636s +Aug 24 12:05:59.569: INFO: The phase of Pod pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b is Running (Ready = true) +Aug 24 12:05:59.569: INFO: Pod "pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b" satisfied condition "running and ready" +STEP: Updating configmap projected-configmap-test-upd-8b7567d7-2a9f-4dad-9e60-ffe4121282c8 08/24/23 12:05:59.585 +STEP: waiting to observe update in volume 08/24/23 12:05:59.594 +[AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:02:23.170: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +Aug 24 12:06:01.619: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +[DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +[DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 -STEP: Destroying namespace "gc-1570" for this suite. 07/29/23 16:02:23.18 +STEP: Destroying namespace "projected-5905" for this suite. 08/24/23 12:06:01.628 ------------------------------ -• [1.485 seconds] -[sig-api-machinery] Garbage collector -test/e2e/apimachinery/framework.go:23 - should delete RS created by deployment when not orphaning [Conformance] - test/e2e/apimachinery/garbage_collector.go:491 +• [4.159 seconds] +[sig-storage] Projected configMap +test/e2e/common/storage/framework.go:23 + updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:124 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Garbage collector + [BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:02:21.71 - Jul 29 16:02:21.710: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename gc 07/29/23 16:02:21.712 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:21.75 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:21.758 - [BeforeEach] [sig-api-machinery] Garbage collector + STEP: Creating a kubernetes client 08/24/23 12:05:57.48 + Aug 24 12:05:57.480: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:05:57.483 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:05:57.52 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:05:57.523 + [BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 - [It] should delete RS created by deployment when not orphaning [Conformance] - test/e2e/apimachinery/garbage_collector.go:491 - STEP: create the deployment 07/29/23 16:02:21.765 - STEP: Wait for the Deployment to create new ReplicaSet 07/29/23 16:02:21.777 - STEP: delete the deployment 07/29/23 16:02:22.301 - STEP: wait for all rs to be garbage collected 07/29/23 16:02:22.332 - STEP: expected 0 rs, got 1 rs 07/29/23 16:02:22.345 - STEP: expected 0 pods, got 2 pods 07/29/23 16:02:22.354 - STEP: Gathering metrics 07/29/23 16:02:22.879 - Jul 29 16:02:22.996: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" - Jul 29 16:02:23.016: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 19.856118ms - Jul 29 16:02:23.016: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) - Jul 29 16:02:23.016: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" - Jul 29 16:02:23.170: INFO: For apiserver_request_total: - For apiserver_request_latency_seconds: - For apiserver_init_events_total: - For garbage_collector_attempt_to_delete_queue_latency: - For garbage_collector_attempt_to_delete_work_duration: - For garbage_collector_attempt_to_orphan_queue_latency: - For garbage_collector_attempt_to_orphan_work_duration: - For garbage_collector_dirty_processing_latency_microseconds: - For garbage_collector_event_processing_latency_microseconds: - For garbage_collector_graph_changes_queue_latency: - For garbage_collector_graph_changes_work_duration: - For garbage_collector_orphan_processing_latency_microseconds: - For namespace_queue_latency: - For namespace_queue_latency_sum: - For namespace_queue_latency_count: - For namespace_retries: - For namespace_work_duration: - For namespace_work_duration_sum: - For namespace_work_duration_count: - For function_duration_seconds: - For errors_total: - For evicted_pods_total: + [It] updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:124 + STEP: Creating projection with configMap that has name projected-configmap-test-upd-8b7567d7-2a9f-4dad-9e60-ffe4121282c8 08/24/23 12:05:57.534 + STEP: Creating the pod 08/24/23 12:05:57.54 + Aug 24 12:05:57.554: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b" in namespace "projected-5905" to be "running and ready" + Aug 24 12:05:57.560: INFO: Pod "pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b": Phase="Pending", Reason="", readiness=false. Elapsed: 6.274666ms + Aug 24 12:05:57.560: INFO: The phase of Pod pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:05:59.569: INFO: Pod "pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b": Phase="Running", Reason="", readiness=true. Elapsed: 2.015087636s + Aug 24 12:05:59.569: INFO: The phase of Pod pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b is Running (Ready = true) + Aug 24 12:05:59.569: INFO: Pod "pod-projected-configmaps-e2ca7f0b-448b-46c0-a0b8-662cd7ed797b" satisfied condition "running and ready" + STEP: Updating configmap projected-configmap-test-upd-8b7567d7-2a9f-4dad-9e60-ffe4121282c8 08/24/23 12:05:59.585 + STEP: waiting to observe update in volume 08/24/23 12:05:59.594 + [AfterEach] [sig-storage] Projected configMap + test/e2e/framework/node/init/init.go:32 + Aug 24 12:06:01.619: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected configMap + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-storage] Projected configMap + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-storage] Projected configMap + tear down framework | framework.go:193 + STEP: Destroying namespace "projected-5905" for this suite. 08/24/23 12:06:01.628 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSS +------------------------------ +[sig-api-machinery] ResourceQuota + should verify ResourceQuota with terminating scopes. [Conformance] + test/e2e/apimachinery/resource_quota.go:690 +[BeforeEach] [sig-api-machinery] ResourceQuota + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:06:01.641 +Aug 24 12:06:01.641: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 12:06:01.646 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:06:01.678 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:06:01.685 +[BeforeEach] [sig-api-machinery] ResourceQuota + test/e2e/framework/metrics/init/init.go:31 +[It] should verify ResourceQuota with terminating scopes. [Conformance] + test/e2e/apimachinery/resource_quota.go:690 +STEP: Creating a ResourceQuota with terminating scope 08/24/23 12:06:01.691 +STEP: Ensuring ResourceQuota status is calculated 08/24/23 12:06:01.699 +STEP: Creating a ResourceQuota with not terminating scope 08/24/23 12:06:03.712 +STEP: Ensuring ResourceQuota status is calculated 08/24/23 12:06:03.722 +STEP: Creating a long running pod 08/24/23 12:06:05.732 +STEP: Ensuring resource quota with not terminating scope captures the pod usage 08/24/23 12:06:05.762 +STEP: Ensuring resource quota with terminating scope ignored the pod usage 08/24/23 12:06:07.772 +STEP: Deleting the pod 08/24/23 12:06:09.778 +STEP: Ensuring resource quota status released the pod usage 08/24/23 12:06:09.802 +STEP: Creating a terminating pod 08/24/23 12:06:11.811 +STEP: Ensuring resource quota with terminating scope captures the pod usage 08/24/23 12:06:11.833 +STEP: Ensuring resource quota with not terminating scope ignored the pod usage 08/24/23 12:06:13.841 +STEP: Deleting the pod 08/24/23 12:06:15.851 +STEP: Ensuring resource quota status released the pod usage 08/24/23 12:06:15.886 +[AfterEach] [sig-api-machinery] ResourceQuota + test/e2e/framework/node/init/init.go:32 +Aug 24 12:06:17.895: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + tear down framework | framework.go:193 +STEP: Destroying namespace "resourcequota-749" for this suite. 08/24/23 12:06:17.907 +------------------------------ +• [SLOW TEST] [16.278 seconds] +[sig-api-machinery] ResourceQuota +test/e2e/apimachinery/framework.go:23 + should verify ResourceQuota with terminating scopes. [Conformance] + test/e2e/apimachinery/resource_quota.go:690 - [AfterEach] [sig-api-machinery] Garbage collector + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-api-machinery] ResourceQuota + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:06:01.641 + Aug 24 12:06:01.641: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 12:06:01.646 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:06:01.678 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:06:01.685 + [BeforeEach] [sig-api-machinery] ResourceQuota + test/e2e/framework/metrics/init/init.go:31 + [It] should verify ResourceQuota with terminating scopes. [Conformance] + test/e2e/apimachinery/resource_quota.go:690 + STEP: Creating a ResourceQuota with terminating scope 08/24/23 12:06:01.691 + STEP: Ensuring ResourceQuota status is calculated 08/24/23 12:06:01.699 + STEP: Creating a ResourceQuota with not terminating scope 08/24/23 12:06:03.712 + STEP: Ensuring ResourceQuota status is calculated 08/24/23 12:06:03.722 + STEP: Creating a long running pod 08/24/23 12:06:05.732 + STEP: Ensuring resource quota with not terminating scope captures the pod usage 08/24/23 12:06:05.762 + STEP: Ensuring resource quota with terminating scope ignored the pod usage 08/24/23 12:06:07.772 + STEP: Deleting the pod 08/24/23 12:06:09.778 + STEP: Ensuring resource quota status released the pod usage 08/24/23 12:06:09.802 + STEP: Creating a terminating pod 08/24/23 12:06:11.811 + STEP: Ensuring resource quota with terminating scope captures the pod usage 08/24/23 12:06:11.833 + STEP: Ensuring resource quota with not terminating scope ignored the pod usage 08/24/23 12:06:13.841 + STEP: Deleting the pod 08/24/23 12:06:15.851 + STEP: Ensuring resource quota status released the pod usage 08/24/23 12:06:15.886 + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:02:23.170: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + Aug 24 12:06:17.895: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "gc-1570" for this suite. 07/29/23 16:02:23.18 + STEP: Destroying namespace "resourcequota-749" for this suite. 08/24/23 12:06:17.907 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Subpath Atomic writer volumes - should support subpaths with downward pod [Conformance] - test/e2e/storage/subpath.go:92 -[BeforeEach] [sig-storage] Subpath +[sig-apps] DisruptionController + should create a PodDisruptionBudget [Conformance] + test/e2e/apps/disruption.go:108 +[BeforeEach] [sig-apps] DisruptionController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:02:23.201 -Jul 29 16:02:23.202: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename subpath 07/29/23 16:02:23.204 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:23.244 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:23.248 -[BeforeEach] [sig-storage] Subpath +STEP: Creating a kubernetes client 08/24/23 12:06:17.926 +Aug 24 12:06:17.926: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename disruption 08/24/23 12:06:17.929 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:06:18.001 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:06:18.006 +[BeforeEach] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] Atomic writer volumes - test/e2e/storage/subpath.go:40 -STEP: Setting up data 07/29/23 16:02:23.253 -[It] should support subpaths with downward pod [Conformance] - test/e2e/storage/subpath.go:92 -STEP: Creating pod pod-subpath-test-downwardapi-xbgv 07/29/23 16:02:23.273 -STEP: Creating a pod to test atomic-volume-subpath 07/29/23 16:02:23.273 -Jul 29 16:02:23.287: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-xbgv" in namespace "subpath-6816" to be "Succeeded or Failed" -Jul 29 16:02:23.292: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Pending", Reason="", readiness=false. Elapsed: 5.019787ms -Jul 29 16:02:25.304: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 2.017062358s -Jul 29 16:02:27.303: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 4.016790244s -Jul 29 16:02:29.301: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 6.014343797s -Jul 29 16:02:31.303: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 8.016350806s -Jul 29 16:02:33.300: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 10.012926735s -Jul 29 16:02:35.302: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 12.015179046s -Jul 29 16:02:37.308: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 14.021106017s -Jul 29 16:02:39.299: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 16.01223641s -Jul 29 16:02:41.299: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 18.01196846s -Jul 29 16:02:43.297: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 20.010729212s -Jul 29 16:02:45.300: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=false. Elapsed: 22.013445416s -Jul 29 16:02:47.300: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.013743181s -STEP: Saw pod success 07/29/23 16:02:47.301 -Jul 29 16:02:47.301: INFO: Pod "pod-subpath-test-downwardapi-xbgv" satisfied condition "Succeeded or Failed" -Jul 29 16:02:47.313: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-subpath-test-downwardapi-xbgv container test-container-subpath-downwardapi-xbgv: -STEP: delete the pod 07/29/23 16:02:47.342 -Jul 29 16:02:47.366: INFO: Waiting for pod pod-subpath-test-downwardapi-xbgv to disappear -Jul 29 16:02:47.373: INFO: Pod pod-subpath-test-downwardapi-xbgv no longer exists -STEP: Deleting pod pod-subpath-test-downwardapi-xbgv 07/29/23 16:02:47.373 -Jul 29 16:02:47.373: INFO: Deleting pod "pod-subpath-test-downwardapi-xbgv" in namespace "subpath-6816" -[AfterEach] [sig-storage] Subpath +[BeforeEach] [sig-apps] DisruptionController + test/e2e/apps/disruption.go:72 +[It] should create a PodDisruptionBudget [Conformance] + test/e2e/apps/disruption.go:108 +STEP: creating the pdb 08/24/23 12:06:18.012 +STEP: Waiting for the pdb to be processed 08/24/23 12:06:18.021 +STEP: updating the pdb 08/24/23 12:06:20.034 +STEP: Waiting for the pdb to be processed 08/24/23 12:06:20.052 +STEP: patching the pdb 08/24/23 12:06:22.067 +STEP: Waiting for the pdb to be processed 08/24/23 12:06:22.084 +STEP: Waiting for the pdb to be deleted 08/24/23 12:06:24.113 +[AfterEach] [sig-apps] DisruptionController test/e2e/framework/node/init/init.go:32 -Jul 29 16:02:47.379: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Subpath +Aug 24 12:06:24.120: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Subpath +[DeferCleanup (Each)] [sig-apps] DisruptionController dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Subpath +[DeferCleanup (Each)] [sig-apps] DisruptionController tear down framework | framework.go:193 -STEP: Destroying namespace "subpath-6816" for this suite. 07/29/23 16:02:47.387 +STEP: Destroying namespace "disruption-3539" for this suite. 08/24/23 12:06:24.126 ------------------------------ -• [SLOW TEST] [24.196 seconds] -[sig-storage] Subpath -test/e2e/storage/utils/framework.go:23 - Atomic writer volumes - test/e2e/storage/subpath.go:36 - should support subpaths with downward pod [Conformance] - test/e2e/storage/subpath.go:92 +• [SLOW TEST] [6.210 seconds] +[sig-apps] DisruptionController +test/e2e/apps/framework.go:23 + should create a PodDisruptionBudget [Conformance] + test/e2e/apps/disruption.go:108 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Subpath + [BeforeEach] [sig-apps] DisruptionController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:02:23.201 - Jul 29 16:02:23.202: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename subpath 07/29/23 16:02:23.204 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:23.244 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:23.248 - [BeforeEach] [sig-storage] Subpath + STEP: Creating a kubernetes client 08/24/23 12:06:17.926 + Aug 24 12:06:17.926: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename disruption 08/24/23 12:06:17.929 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:06:18.001 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:06:18.006 + [BeforeEach] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] Atomic writer volumes - test/e2e/storage/subpath.go:40 - STEP: Setting up data 07/29/23 16:02:23.253 - [It] should support subpaths with downward pod [Conformance] - test/e2e/storage/subpath.go:92 - STEP: Creating pod pod-subpath-test-downwardapi-xbgv 07/29/23 16:02:23.273 - STEP: Creating a pod to test atomic-volume-subpath 07/29/23 16:02:23.273 - Jul 29 16:02:23.287: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-xbgv" in namespace "subpath-6816" to be "Succeeded or Failed" - Jul 29 16:02:23.292: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Pending", Reason="", readiness=false. Elapsed: 5.019787ms - Jul 29 16:02:25.304: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 2.017062358s - Jul 29 16:02:27.303: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 4.016790244s - Jul 29 16:02:29.301: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 6.014343797s - Jul 29 16:02:31.303: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 8.016350806s - Jul 29 16:02:33.300: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 10.012926735s - Jul 29 16:02:35.302: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 12.015179046s - Jul 29 16:02:37.308: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 14.021106017s - Jul 29 16:02:39.299: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 16.01223641s - Jul 29 16:02:41.299: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 18.01196846s - Jul 29 16:02:43.297: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=true. Elapsed: 20.010729212s - Jul 29 16:02:45.300: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Running", Reason="", readiness=false. Elapsed: 22.013445416s - Jul 29 16:02:47.300: INFO: Pod "pod-subpath-test-downwardapi-xbgv": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.013743181s - STEP: Saw pod success 07/29/23 16:02:47.301 - Jul 29 16:02:47.301: INFO: Pod "pod-subpath-test-downwardapi-xbgv" satisfied condition "Succeeded or Failed" - Jul 29 16:02:47.313: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-subpath-test-downwardapi-xbgv container test-container-subpath-downwardapi-xbgv: - STEP: delete the pod 07/29/23 16:02:47.342 - Jul 29 16:02:47.366: INFO: Waiting for pod pod-subpath-test-downwardapi-xbgv to disappear - Jul 29 16:02:47.373: INFO: Pod pod-subpath-test-downwardapi-xbgv no longer exists - STEP: Deleting pod pod-subpath-test-downwardapi-xbgv 07/29/23 16:02:47.373 - Jul 29 16:02:47.373: INFO: Deleting pod "pod-subpath-test-downwardapi-xbgv" in namespace "subpath-6816" - [AfterEach] [sig-storage] Subpath + [BeforeEach] [sig-apps] DisruptionController + test/e2e/apps/disruption.go:72 + [It] should create a PodDisruptionBudget [Conformance] + test/e2e/apps/disruption.go:108 + STEP: creating the pdb 08/24/23 12:06:18.012 + STEP: Waiting for the pdb to be processed 08/24/23 12:06:18.021 + STEP: updating the pdb 08/24/23 12:06:20.034 + STEP: Waiting for the pdb to be processed 08/24/23 12:06:20.052 + STEP: patching the pdb 08/24/23 12:06:22.067 + STEP: Waiting for the pdb to be processed 08/24/23 12:06:22.084 + STEP: Waiting for the pdb to be deleted 08/24/23 12:06:24.113 + [AfterEach] [sig-apps] DisruptionController test/e2e/framework/node/init/init.go:32 - Jul 29 16:02:47.379: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Subpath + Aug 24 12:06:24.120: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Subpath + [DeferCleanup (Each)] [sig-apps] DisruptionController dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Subpath + [DeferCleanup (Each)] [sig-apps] DisruptionController tear down framework | framework.go:193 - STEP: Destroying namespace "subpath-6816" for this suite. 07/29/23 16:02:47.387 + STEP: Destroying namespace "disruption-3539" for this suite. 08/24/23 12:06:24.126 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected secret - should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:119 -[BeforeEach] [sig-storage] Projected secret +[sig-api-machinery] Aggregator + Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] + test/e2e/apimachinery/aggregator.go:100 +[BeforeEach] [sig-api-machinery] Aggregator set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:02:47.403 -Jul 29 16:02:47.403: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:02:47.406 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:47.44 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:47.447 -[BeforeEach] [sig-storage] Projected secret +STEP: Creating a kubernetes client 08/24/23 12:06:24.15 +Aug 24 12:06:24.150: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename aggregator 08/24/23 12:06:24.152 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:06:24.179 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:06:24.184 +[BeforeEach] [sig-api-machinery] Aggregator test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:119 -STEP: Creating secret with name projected-secret-test-ebf58bf0-bf6f-4d36-b08f-ef3041f1e01f 07/29/23 16:02:47.456 -STEP: Creating a pod to test consume secrets 07/29/23 16:02:47.465 -Jul 29 16:02:47.479: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a" in namespace "projected-8066" to be "Succeeded or Failed" -Jul 29 16:02:47.486: INFO: Pod "pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.351971ms -Jul 29 16:02:49.502: INFO: Pod "pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02210185s -Jul 29 16:02:51.496: INFO: Pod "pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015880979s -STEP: Saw pod success 07/29/23 16:02:51.496 -Jul 29 16:02:51.496: INFO: Pod "pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a" satisfied condition "Succeeded or Failed" -Jul 29 16:02:51.504: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a container secret-volume-test: -STEP: delete the pod 07/29/23 16:02:51.517 -Jul 29 16:02:51.548: INFO: Waiting for pod pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a to disappear -Jul 29 16:02:51.555: INFO: Pod pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a no longer exists -[AfterEach] [sig-storage] Projected secret +[BeforeEach] [sig-api-machinery] Aggregator + test/e2e/apimachinery/aggregator.go:78 +Aug 24 12:06:24.188: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +[It] Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] + test/e2e/apimachinery/aggregator.go:100 +STEP: Registering the sample API server. 08/24/23 12:06:24.189 +Aug 24 12:06:25.036: INFO: new replicaset for deployment "sample-apiserver-deployment" is yet to be created +Aug 24 12:06:27.136: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:29.145: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:31.143: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:33.148: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:35.144: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:37.145: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:39.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:41.142: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:43.143: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:45.146: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:47.143: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:49.156: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:06:51.303: INFO: Waited 144.268942ms for the sample-apiserver to be ready to handle requests. +STEP: Read Status for v1alpha1.wardle.example.com 08/24/23 12:06:51.434 +STEP: kubectl patch apiservice v1alpha1.wardle.example.com -p '{"spec":{"versionPriority": 400}}' 08/24/23 12:06:51.444 +STEP: List APIServices 08/24/23 12:06:51.466 +Aug 24 12:06:51.482: INFO: Found v1alpha1.wardle.example.com in APIServiceList +[AfterEach] [sig-api-machinery] Aggregator + test/e2e/apimachinery/aggregator.go:68 +[AfterEach] [sig-api-machinery] Aggregator test/e2e/framework/node/init/init.go:32 -Jul 29 16:02:51.556: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected secret +Aug 24 12:06:51.713: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Aggregator test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected secret +[DeferCleanup (Each)] [sig-api-machinery] Aggregator dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected secret +[DeferCleanup (Each)] [sig-api-machinery] Aggregator tear down framework | framework.go:193 -STEP: Destroying namespace "projected-8066" for this suite. 07/29/23 16:02:51.568 +STEP: Destroying namespace "aggregator-4095" for this suite. 08/24/23 12:06:51.736 ------------------------------ -• [4.178 seconds] -[sig-storage] Projected secret -test/e2e/common/storage/framework.go:23 - should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:119 +• [SLOW TEST] [27.606 seconds] +[sig-api-machinery] Aggregator +test/e2e/apimachinery/framework.go:23 + Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] + test/e2e/apimachinery/aggregator.go:100 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected secret + [BeforeEach] [sig-api-machinery] Aggregator set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:02:47.403 - Jul 29 16:02:47.403: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:02:47.406 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:47.44 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:47.447 - [BeforeEach] [sig-storage] Projected secret + STEP: Creating a kubernetes client 08/24/23 12:06:24.15 + Aug 24 12:06:24.150: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename aggregator 08/24/23 12:06:24.152 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:06:24.179 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:06:24.184 + [BeforeEach] [sig-api-machinery] Aggregator test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:119 - STEP: Creating secret with name projected-secret-test-ebf58bf0-bf6f-4d36-b08f-ef3041f1e01f 07/29/23 16:02:47.456 - STEP: Creating a pod to test consume secrets 07/29/23 16:02:47.465 - Jul 29 16:02:47.479: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a" in namespace "projected-8066" to be "Succeeded or Failed" - Jul 29 16:02:47.486: INFO: Pod "pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.351971ms - Jul 29 16:02:49.502: INFO: Pod "pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02210185s - Jul 29 16:02:51.496: INFO: Pod "pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015880979s - STEP: Saw pod success 07/29/23 16:02:51.496 - Jul 29 16:02:51.496: INFO: Pod "pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a" satisfied condition "Succeeded or Failed" - Jul 29 16:02:51.504: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a container secret-volume-test: - STEP: delete the pod 07/29/23 16:02:51.517 - Jul 29 16:02:51.548: INFO: Waiting for pod pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a to disappear - Jul 29 16:02:51.555: INFO: Pod pod-projected-secrets-b676645c-ffb0-46cc-9f3e-cadfec51183a no longer exists - [AfterEach] [sig-storage] Projected secret + [BeforeEach] [sig-api-machinery] Aggregator + test/e2e/apimachinery/aggregator.go:78 + Aug 24 12:06:24.188: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + [It] Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] + test/e2e/apimachinery/aggregator.go:100 + STEP: Registering the sample API server. 08/24/23 12:06:24.189 + Aug 24 12:06:25.036: INFO: new replicaset for deployment "sample-apiserver-deployment" is yet to be created + Aug 24 12:06:27.136: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:29.145: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:31.143: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:33.148: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:35.144: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:37.145: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:39.152: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:41.142: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:43.143: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:45.146: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:47.143: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:49.156: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 6, 25, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-55bd96fd47\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:06:51.303: INFO: Waited 144.268942ms for the sample-apiserver to be ready to handle requests. + STEP: Read Status for v1alpha1.wardle.example.com 08/24/23 12:06:51.434 + STEP: kubectl patch apiservice v1alpha1.wardle.example.com -p '{"spec":{"versionPriority": 400}}' 08/24/23 12:06:51.444 + STEP: List APIServices 08/24/23 12:06:51.466 + Aug 24 12:06:51.482: INFO: Found v1alpha1.wardle.example.com in APIServiceList + [AfterEach] [sig-api-machinery] Aggregator + test/e2e/apimachinery/aggregator.go:68 + [AfterEach] [sig-api-machinery] Aggregator test/e2e/framework/node/init/init.go:32 - Jul 29 16:02:51.556: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected secret + Aug 24 12:06:51.713: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Aggregator test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected secret + [DeferCleanup (Each)] [sig-api-machinery] Aggregator dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected secret + [DeferCleanup (Each)] [sig-api-machinery] Aggregator tear down framework | framework.go:193 - STEP: Destroying namespace "projected-8066" for this suite. 07/29/23 16:02:51.568 + STEP: Destroying namespace "aggregator-4095" for this suite. 08/24/23 12:06:51.736 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SS ------------------------------ -[sig-storage] ConfigMap - should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:109 -[BeforeEach] [sig-storage] ConfigMap +[sig-instrumentation] Events + should delete a collection of events [Conformance] + test/e2e/instrumentation/core_events.go:175 +[BeforeEach] [sig-instrumentation] Events set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:02:51.584 -Jul 29 16:02:51.584: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:02:51.588 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:51.636 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:51.641 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 12:06:51.76 +Aug 24 12:06:51.760: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename events 08/24/23 12:06:51.766 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:06:51.823 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:06:51.836 +[BeforeEach] [sig-instrumentation] Events test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:109 -STEP: Creating configMap with name configmap-test-volume-map-ebe4bffd-8b5d-4443-b2ed-aea55ff3d7b9 07/29/23 16:02:51.647 -STEP: Creating a pod to test consume configMaps 07/29/23 16:02:51.656 -Jul 29 16:02:51.696: INFO: Waiting up to 5m0s for pod "pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192" in namespace "configmap-8893" to be "Succeeded or Failed" -Jul 29 16:02:51.704: INFO: Pod "pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192": Phase="Pending", Reason="", readiness=false. Elapsed: 7.813797ms -Jul 29 16:02:53.712: INFO: Pod "pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016218955s -Jul 29 16:02:55.713: INFO: Pod "pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016286077s -STEP: Saw pod success 07/29/23 16:02:55.713 -Jul 29 16:02:55.713: INFO: Pod "pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192" satisfied condition "Succeeded or Failed" -Jul 29 16:02:55.732: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192 container agnhost-container: -STEP: delete the pod 07/29/23 16:02:55.751 -Jul 29 16:02:55.783: INFO: Waiting for pod pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192 to disappear -Jul 29 16:02:55.789: INFO: Pod pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192 no longer exists -[AfterEach] [sig-storage] ConfigMap +[It] should delete a collection of events [Conformance] + test/e2e/instrumentation/core_events.go:175 +STEP: Create set of events 08/24/23 12:06:51.845 +Aug 24 12:06:51.855: INFO: created test-event-1 +Aug 24 12:06:51.869: INFO: created test-event-2 +Aug 24 12:06:51.880: INFO: created test-event-3 +STEP: get a list of Events with a label in the current namespace 08/24/23 12:06:51.88 +STEP: delete collection of events 08/24/23 12:06:51.886 +Aug 24 12:06:51.887: INFO: requesting DeleteCollection of events +STEP: check that the list of events matches the requested quantity 08/24/23 12:06:51.95 +Aug 24 12:06:51.950: INFO: requesting list of events to confirm quantity +[AfterEach] [sig-instrumentation] Events test/e2e/framework/node/init/init.go:32 -Jul 29 16:02:55.790: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 12:06:51.962: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-instrumentation] Events test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-instrumentation] Events dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-instrumentation] Events tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-8893" for this suite. 07/29/23 16:02:55.802 +STEP: Destroying namespace "events-7681" for this suite. 08/24/23 12:06:51.973 ------------------------------ -• [4.232 seconds] -[sig-storage] ConfigMap -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:109 +• [0.233 seconds] +[sig-instrumentation] Events +test/e2e/instrumentation/common/framework.go:23 + should delete a collection of events [Conformance] + test/e2e/instrumentation/core_events.go:175 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-instrumentation] Events set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:02:51.584 - Jul 29 16:02:51.584: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:02:51.588 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:51.636 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:51.641 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 12:06:51.76 + Aug 24 12:06:51.760: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename events 08/24/23 12:06:51.766 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:06:51.823 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:06:51.836 + [BeforeEach] [sig-instrumentation] Events test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:109 - STEP: Creating configMap with name configmap-test-volume-map-ebe4bffd-8b5d-4443-b2ed-aea55ff3d7b9 07/29/23 16:02:51.647 - STEP: Creating a pod to test consume configMaps 07/29/23 16:02:51.656 - Jul 29 16:02:51.696: INFO: Waiting up to 5m0s for pod "pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192" in namespace "configmap-8893" to be "Succeeded or Failed" - Jul 29 16:02:51.704: INFO: Pod "pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192": Phase="Pending", Reason="", readiness=false. Elapsed: 7.813797ms - Jul 29 16:02:53.712: INFO: Pod "pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016218955s - Jul 29 16:02:55.713: INFO: Pod "pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016286077s - STEP: Saw pod success 07/29/23 16:02:55.713 - Jul 29 16:02:55.713: INFO: Pod "pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192" satisfied condition "Succeeded or Failed" - Jul 29 16:02:55.732: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192 container agnhost-container: - STEP: delete the pod 07/29/23 16:02:55.751 - Jul 29 16:02:55.783: INFO: Waiting for pod pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192 to disappear - Jul 29 16:02:55.789: INFO: Pod pod-configmaps-20a4d987-3299-4007-bce0-2aedfadfa192 no longer exists - [AfterEach] [sig-storage] ConfigMap + [It] should delete a collection of events [Conformance] + test/e2e/instrumentation/core_events.go:175 + STEP: Create set of events 08/24/23 12:06:51.845 + Aug 24 12:06:51.855: INFO: created test-event-1 + Aug 24 12:06:51.869: INFO: created test-event-2 + Aug 24 12:06:51.880: INFO: created test-event-3 + STEP: get a list of Events with a label in the current namespace 08/24/23 12:06:51.88 + STEP: delete collection of events 08/24/23 12:06:51.886 + Aug 24 12:06:51.887: INFO: requesting DeleteCollection of events + STEP: check that the list of events matches the requested quantity 08/24/23 12:06:51.95 + Aug 24 12:06:51.950: INFO: requesting list of events to confirm quantity + [AfterEach] [sig-instrumentation] Events test/e2e/framework/node/init/init.go:32 - Jul 29 16:02:55.790: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 12:06:51.962: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-instrumentation] Events test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-instrumentation] Events dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-instrumentation] Events tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-8893" for this suite. 07/29/23 16:02:55.802 + STEP: Destroying namespace "events-7681" for this suite. 08/24/23 12:06:51.973 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSS +SSSSSSSS ------------------------------ -[sig-auth] ServiceAccounts - ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] - test/e2e/auth/service_accounts.go:531 -[BeforeEach] [sig-auth] ServiceAccounts +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a service. [Conformance] + test/e2e/apimachinery/resource_quota.go:100 +[BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:02:55.819 -Jul 29 16:02:55.819: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename svcaccounts 07/29/23 16:02:55.821 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:55.85 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:55.855 -[BeforeEach] [sig-auth] ServiceAccounts +STEP: Creating a kubernetes client 08/24/23 12:06:51.993 +Aug 24 12:06:51.993: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 12:06:51.996 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:06:52.033 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:06:52.041 +[BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 -[It] ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] - test/e2e/auth/service_accounts.go:531 -Jul 29 16:02:55.892: INFO: created pod -Jul 29 16:02:55.892: INFO: Waiting up to 5m0s for pod "oidc-discovery-validator" in namespace "svcaccounts-3967" to be "Succeeded or Failed" -Jul 29 16:02:55.904: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 12.285696ms -Jul 29 16:02:57.912: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020011941s -Jul 29 16:02:59.915: INFO: Pod "oidc-discovery-validator": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.023587754s -STEP: Saw pod success 07/29/23 16:02:59.916 -Jul 29 16:02:59.916: INFO: Pod "oidc-discovery-validator" satisfied condition "Succeeded or Failed" -Jul 29 16:03:29.917: INFO: polling logs -Jul 29 16:03:29.939: INFO: Pod logs: -I0729 16:02:56.947152 1 log.go:198] OK: Got token -I0729 16:02:56.947305 1 log.go:198] validating with in-cluster discovery -I0729 16:02:56.948079 1 log.go:198] OK: got issuer https://kubernetes.default.svc.cluster.local -I0729 16:02:56.948148 1 log.go:198] Full, not-validated claims: -openidmetadata.claims{Claims:jwt.Claims{Issuer:"https://kubernetes.default.svc.cluster.local", Subject:"system:serviceaccount:svcaccounts-3967:default", Audience:jwt.Audience{"oidc-discovery-test"}, Expiry:1690647176, NotBefore:1690646576, IssuedAt:1690646576, ID:""}, Kubernetes:openidmetadata.kubeClaims{Namespace:"svcaccounts-3967", ServiceAccount:openidmetadata.kubeName{Name:"default", UID:"517d1e82-15d6-46c5-b850-c102f0082e18"}}} -I0729 16:02:56.982414 1 log.go:198] OK: Constructed OIDC provider for issuer https://kubernetes.default.svc.cluster.local -I0729 16:02:57.001466 1 log.go:198] OK: Validated signature on JWT -I0729 16:02:57.001613 1 log.go:198] OK: Got valid claims from token! -I0729 16:02:57.001670 1 log.go:198] Full, validated claims: -&openidmetadata.claims{Claims:jwt.Claims{Issuer:"https://kubernetes.default.svc.cluster.local", Subject:"system:serviceaccount:svcaccounts-3967:default", Audience:jwt.Audience{"oidc-discovery-test"}, Expiry:1690647176, NotBefore:1690646576, IssuedAt:1690646576, ID:""}, Kubernetes:openidmetadata.kubeClaims{Namespace:"svcaccounts-3967", ServiceAccount:openidmetadata.kubeName{Name:"default", UID:"517d1e82-15d6-46c5-b850-c102f0082e18"}}} - -Jul 29 16:03:29.939: INFO: completed pod -[AfterEach] [sig-auth] ServiceAccounts +[It] should create a ResourceQuota and capture the life of a service. [Conformance] + test/e2e/apimachinery/resource_quota.go:100 +STEP: Counting existing ResourceQuota 08/24/23 12:06:52.047 +STEP: Creating a ResourceQuota 08/24/23 12:06:57.056 +STEP: Ensuring resource quota status is calculated 08/24/23 12:06:57.065 +STEP: Creating a Service 08/24/23 12:06:59.074 +STEP: Creating a NodePort Service 08/24/23 12:06:59.109 +STEP: Not allowing a LoadBalancer Service with NodePort to be created that exceeds remaining quota 08/24/23 12:06:59.172 +STEP: Ensuring resource quota status captures service creation 08/24/23 12:06:59.245 +STEP: Deleting Services 08/24/23 12:07:01.252 +STEP: Ensuring resource quota status released usage 08/24/23 12:07:01.337 +[AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 16:03:29.952: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +Aug 24 12:07:03.349: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "svcaccounts-3967" for this suite. 07/29/23 16:03:29.965 +STEP: Destroying namespace "resourcequota-871" for this suite. 08/24/23 12:07:03.361 ------------------------------ -• [SLOW TEST] [34.165 seconds] -[sig-auth] ServiceAccounts -test/e2e/auth/framework.go:23 - ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] - test/e2e/auth/service_accounts.go:531 +• [SLOW TEST] [11.402 seconds] +[sig-api-machinery] ResourceQuota +test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a service. [Conformance] + test/e2e/apimachinery/resource_quota.go:100 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-auth] ServiceAccounts + [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:02:55.819 - Jul 29 16:02:55.819: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename svcaccounts 07/29/23 16:02:55.821 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:02:55.85 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:02:55.855 - [BeforeEach] [sig-auth] ServiceAccounts + STEP: Creating a kubernetes client 08/24/23 12:06:51.993 + Aug 24 12:06:51.993: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 12:06:51.996 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:06:52.033 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:06:52.041 + [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [It] ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] - test/e2e/auth/service_accounts.go:531 - Jul 29 16:02:55.892: INFO: created pod - Jul 29 16:02:55.892: INFO: Waiting up to 5m0s for pod "oidc-discovery-validator" in namespace "svcaccounts-3967" to be "Succeeded or Failed" - Jul 29 16:02:55.904: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 12.285696ms - Jul 29 16:02:57.912: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020011941s - Jul 29 16:02:59.915: INFO: Pod "oidc-discovery-validator": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.023587754s - STEP: Saw pod success 07/29/23 16:02:59.916 - Jul 29 16:02:59.916: INFO: Pod "oidc-discovery-validator" satisfied condition "Succeeded or Failed" - Jul 29 16:03:29.917: INFO: polling logs - Jul 29 16:03:29.939: INFO: Pod logs: - I0729 16:02:56.947152 1 log.go:198] OK: Got token - I0729 16:02:56.947305 1 log.go:198] validating with in-cluster discovery - I0729 16:02:56.948079 1 log.go:198] OK: got issuer https://kubernetes.default.svc.cluster.local - I0729 16:02:56.948148 1 log.go:198] Full, not-validated claims: - openidmetadata.claims{Claims:jwt.Claims{Issuer:"https://kubernetes.default.svc.cluster.local", Subject:"system:serviceaccount:svcaccounts-3967:default", Audience:jwt.Audience{"oidc-discovery-test"}, Expiry:1690647176, NotBefore:1690646576, IssuedAt:1690646576, ID:""}, Kubernetes:openidmetadata.kubeClaims{Namespace:"svcaccounts-3967", ServiceAccount:openidmetadata.kubeName{Name:"default", UID:"517d1e82-15d6-46c5-b850-c102f0082e18"}}} - I0729 16:02:56.982414 1 log.go:198] OK: Constructed OIDC provider for issuer https://kubernetes.default.svc.cluster.local - I0729 16:02:57.001466 1 log.go:198] OK: Validated signature on JWT - I0729 16:02:57.001613 1 log.go:198] OK: Got valid claims from token! - I0729 16:02:57.001670 1 log.go:198] Full, validated claims: - &openidmetadata.claims{Claims:jwt.Claims{Issuer:"https://kubernetes.default.svc.cluster.local", Subject:"system:serviceaccount:svcaccounts-3967:default", Audience:jwt.Audience{"oidc-discovery-test"}, Expiry:1690647176, NotBefore:1690646576, IssuedAt:1690646576, ID:""}, Kubernetes:openidmetadata.kubeClaims{Namespace:"svcaccounts-3967", ServiceAccount:openidmetadata.kubeName{Name:"default", UID:"517d1e82-15d6-46c5-b850-c102f0082e18"}}} - - Jul 29 16:03:29.939: INFO: completed pod - [AfterEach] [sig-auth] ServiceAccounts + [It] should create a ResourceQuota and capture the life of a service. [Conformance] + test/e2e/apimachinery/resource_quota.go:100 + STEP: Counting existing ResourceQuota 08/24/23 12:06:52.047 + STEP: Creating a ResourceQuota 08/24/23 12:06:57.056 + STEP: Ensuring resource quota status is calculated 08/24/23 12:06:57.065 + STEP: Creating a Service 08/24/23 12:06:59.074 + STEP: Creating a NodePort Service 08/24/23 12:06:59.109 + STEP: Not allowing a LoadBalancer Service with NodePort to be created that exceeds remaining quota 08/24/23 12:06:59.172 + STEP: Ensuring resource quota status captures service creation 08/24/23 12:06:59.245 + STEP: Deleting Services 08/24/23 12:07:01.252 + STEP: Ensuring resource quota status released usage 08/24/23 12:07:01.337 + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:03:29.952: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + Aug 24 12:07:03.349: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "svcaccounts-3967" for this suite. 07/29/23 16:03:29.965 + STEP: Destroying namespace "resourcequota-871" for this suite. 08/24/23 12:07:03.361 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl run pod - should create a pod from an image when restart is Never [Conformance] - test/e2e/kubectl/kubectl.go:1713 -[BeforeEach] [sig-cli] Kubectl client +[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath + runs ReplicaSets to verify preemption running path [Conformance] + test/e2e/scheduling/preemption.go:624 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:07:03.404 +Aug 24 12:07:03.404: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sched-preemption 08/24/23 12:07:03.408 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:07:03.463 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:07:03.468 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:97 +Aug 24 12:07:03.501: INFO: Waiting up to 1m0s for all nodes to be ready +Aug 24 12:08:03.586: INFO: Waiting for terminating namespaces to be deleted... +[BeforeEach] PreemptionExecutionPath set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:03:29.993 -Jul 29 16:03:29.993: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:03:29.997 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:03:30.036 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:03:30.041 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 12:08:03.594 +Aug 24 12:08:03.595: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sched-preemption-path 08/24/23 12:08:03.598 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:03.644 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:03.65 +[BeforeEach] PreemptionExecutionPath test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[BeforeEach] Kubectl run pod - test/e2e/kubectl/kubectl.go:1700 -[It] should create a pod from an image when restart is Never [Conformance] - test/e2e/kubectl/kubectl.go:1713 -STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 07/29/23 16:03:30.045 -Jul 29 16:03:30.046: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9766 run e2e-test-httpd-pod --restart=Never --pod-running-timeout=2m0s --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4' -Jul 29 16:03:30.217: INFO: stderr: "" -Jul 29 16:03:30.217: INFO: stdout: "pod/e2e-test-httpd-pod created\n" -STEP: verifying the pod e2e-test-httpd-pod was created 07/29/23 16:03:30.217 -[AfterEach] Kubectl run pod - test/e2e/kubectl/kubectl.go:1704 -Jul 29 16:03:30.226: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9766 delete pods e2e-test-httpd-pod' -Jul 29 16:03:32.471: INFO: stderr: "" -Jul 29 16:03:32.471: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" -[AfterEach] [sig-cli] Kubectl client +[BeforeEach] PreemptionExecutionPath + test/e2e/scheduling/preemption.go:576 +STEP: Finding an available node 08/24/23 12:08:03.657 +STEP: Trying to launch a pod without a label to get a node which can launch it. 08/24/23 12:08:03.657 +Aug 24 12:08:03.673: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-preemption-path-2018" to be "running" +Aug 24 12:08:03.682: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 8.189073ms +Aug 24 12:08:05.690: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.016628331s +Aug 24 12:08:05.690: INFO: Pod "without-label" satisfied condition "running" +STEP: Explicitly delete pod here to free the resource it takes. 08/24/23 12:08:05.697 +Aug 24 12:08:05.720: INFO: found a healthy node: pe9deep4seen-3 +[It] runs ReplicaSets to verify preemption running path [Conformance] + test/e2e/scheduling/preemption.go:624 +Aug 24 12:08:11.873: INFO: pods created so far: [1 1 1] +Aug 24 12:08:11.874: INFO: length of pods created so far: 3 +Aug 24 12:08:13.893: INFO: pods created so far: [2 2 1] +[AfterEach] PreemptionExecutionPath test/e2e/framework/node/init/init.go:32 -Jul 29 16:03:32.471: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 12:08:20.894: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] PreemptionExecutionPath + test/e2e/scheduling/preemption.go:549 +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/framework/node/init/init.go:32 +Aug 24 12:08:20.964: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:84 +[DeferCleanup (Each)] PreemptionExecutionPath test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] PreemptionExecutionPath dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] PreemptionExecutionPath + tear down framework | framework.go:193 +STEP: Destroying namespace "sched-preemption-path-2018" for this suite. 08/24/23 12:08:21.055 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-9766" for this suite. 07/29/23 16:03:32.478 +STEP: Destroying namespace "sched-preemption-3895" for this suite. 08/24/23 12:08:21.07 ------------------------------ -• [2.499 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Kubectl run pod - test/e2e/kubectl/kubectl.go:1697 - should create a pod from an image when restart is Never [Conformance] - test/e2e/kubectl/kubectl.go:1713 +• [SLOW TEST] [77.681 seconds] +[sig-scheduling] SchedulerPreemption [Serial] +test/e2e/scheduling/framework.go:40 + PreemptionExecutionPath + test/e2e/scheduling/preemption.go:537 + runs ReplicaSets to verify preemption running path [Conformance] + test/e2e/scheduling/preemption.go:624 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:03:29.993 - Jul 29 16:03:29.993: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:03:29.997 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:03:30.036 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:03:30.041 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 12:07:03.404 + Aug 24 12:07:03.404: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sched-preemption 08/24/23 12:07:03.408 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:07:03.463 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:07:03.468 + [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [BeforeEach] Kubectl run pod - test/e2e/kubectl/kubectl.go:1700 - [It] should create a pod from an image when restart is Never [Conformance] - test/e2e/kubectl/kubectl.go:1713 - STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 07/29/23 16:03:30.045 - Jul 29 16:03:30.046: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9766 run e2e-test-httpd-pod --restart=Never --pod-running-timeout=2m0s --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4' - Jul 29 16:03:30.217: INFO: stderr: "" - Jul 29 16:03:30.217: INFO: stdout: "pod/e2e-test-httpd-pod created\n" - STEP: verifying the pod e2e-test-httpd-pod was created 07/29/23 16:03:30.217 - [AfterEach] Kubectl run pod - test/e2e/kubectl/kubectl.go:1704 - Jul 29 16:03:30.226: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9766 delete pods e2e-test-httpd-pod' - Jul 29 16:03:32.471: INFO: stderr: "" - Jul 29 16:03:32.471: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" - [AfterEach] [sig-cli] Kubectl client + [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:97 + Aug 24 12:07:03.501: INFO: Waiting up to 1m0s for all nodes to be ready + Aug 24 12:08:03.586: INFO: Waiting for terminating namespaces to be deleted... + [BeforeEach] PreemptionExecutionPath + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:08:03.594 + Aug 24 12:08:03.595: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sched-preemption-path 08/24/23 12:08:03.598 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:03.644 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:03.65 + [BeforeEach] PreemptionExecutionPath + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] PreemptionExecutionPath + test/e2e/scheduling/preemption.go:576 + STEP: Finding an available node 08/24/23 12:08:03.657 + STEP: Trying to launch a pod without a label to get a node which can launch it. 08/24/23 12:08:03.657 + Aug 24 12:08:03.673: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-preemption-path-2018" to be "running" + Aug 24 12:08:03.682: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 8.189073ms + Aug 24 12:08:05.690: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.016628331s + Aug 24 12:08:05.690: INFO: Pod "without-label" satisfied condition "running" + STEP: Explicitly delete pod here to free the resource it takes. 08/24/23 12:08:05.697 + Aug 24 12:08:05.720: INFO: found a healthy node: pe9deep4seen-3 + [It] runs ReplicaSets to verify preemption running path [Conformance] + test/e2e/scheduling/preemption.go:624 + Aug 24 12:08:11.873: INFO: pods created so far: [1 1 1] + Aug 24 12:08:11.874: INFO: length of pods created so far: 3 + Aug 24 12:08:13.893: INFO: pods created so far: [2 2 1] + [AfterEach] PreemptionExecutionPath test/e2e/framework/node/init/init.go:32 - Jul 29 16:03:32.471: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 12:08:20.894: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] PreemptionExecutionPath + test/e2e/scheduling/preemption.go:549 + [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/framework/node/init/init.go:32 + Aug 24 12:08:20.964: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:84 + [DeferCleanup (Each)] PreemptionExecutionPath test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] PreemptionExecutionPath dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] PreemptionExecutionPath + tear down framework | framework.go:193 + STEP: Destroying namespace "sched-preemption-path-2018" for this suite. 08/24/23 12:08:21.055 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-9766" for this suite. 07/29/23 16:03:32.478 + STEP: Destroying namespace "sched-preemption-3895" for this suite. 08/24/23 12:08:21.07 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Pods - should delete a collection of pods [Conformance] - test/e2e/common/node/pods.go:845 -[BeforeEach] [sig-node] Pods +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of same group and version but different kinds [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:357 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:03:32.494 -Jul 29 16:03:32.494: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 16:03:32.497 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:03:32.528 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:03:32.532 -[BeforeEach] [sig-node] Pods +STEP: Creating a kubernetes client 08/24/23 12:08:21.094 +Aug 24 12:08:21.094: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:08:21.097 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:21.131 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:21.136 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 -[It] should delete a collection of pods [Conformance] - test/e2e/common/node/pods.go:845 -STEP: Create set of pods 07/29/23 16:03:32.537 -Jul 29 16:03:32.551: INFO: created test-pod-1 -Jul 29 16:03:32.560: INFO: created test-pod-2 -Jul 29 16:03:32.576: INFO: created test-pod-3 -STEP: waiting for all 3 pods to be running 07/29/23 16:03:32.577 -Jul 29 16:03:32.577: INFO: Waiting up to 5m0s for all pods (need at least 3) in namespace 'pods-9917' to be running and ready -Jul 29 16:03:32.607: INFO: The status of Pod test-pod-1 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed -Jul 29 16:03:32.607: INFO: The status of Pod test-pod-2 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed -Jul 29 16:03:32.607: INFO: The status of Pod test-pod-3 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed -Jul 29 16:03:32.607: INFO: 0 / 3 pods in namespace 'pods-9917' are running and ready (0 seconds elapsed) -Jul 29 16:03:32.607: INFO: expected 0 pod replicas in namespace 'pods-9917', 0 are Running and Ready. -Jul 29 16:03:32.607: INFO: POD NODE PHASE GRACE CONDITIONS -Jul 29 16:03:32.607: INFO: test-pod-1 wetuj3nuajog-3 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC ContainersNotReady containers with unready status: [token-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC ContainersNotReady containers with unready status: [token-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC }] -Jul 29 16:03:32.608: INFO: test-pod-2 wetuj3nuajog-3 Pending [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC }] -Jul 29 16:03:32.608: INFO: test-pod-3 wetuj3nuajog-3 Pending [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC }] -Jul 29 16:03:32.608: INFO: -Jul 29 16:03:34.626: INFO: 3 / 3 pods in namespace 'pods-9917' are running and ready (2 seconds elapsed) -Jul 29 16:03:34.626: INFO: expected 0 pod replicas in namespace 'pods-9917', 0 are Running and Ready. -STEP: waiting for all pods to be deleted 07/29/23 16:03:34.662 -Jul 29 16:03:34.670: INFO: Pod quantity 3 is different from expected quantity 0 -Jul 29 16:03:35.692: INFO: Pod quantity 3 is different from expected quantity 0 -Jul 29 16:03:36.681: INFO: Pod quantity 3 is different from expected quantity 0 -Jul 29 16:03:37.684: INFO: Pod quantity 1 is different from expected quantity 0 -[AfterEach] [sig-node] Pods +[It] works for multiple CRDs of same group and version but different kinds [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:357 +STEP: CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation 08/24/23 12:08:21.142 +Aug 24 12:08:21.145: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:24.743: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:03:38.677: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods +Aug 24 12:08:34.853: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "pods-9917" for this suite. 07/29/23 16:03:38.685 +STEP: Destroying namespace "crd-publish-openapi-8912" for this suite. 08/24/23 12:08:34.878 ------------------------------ -• [SLOW TEST] [6.203 seconds] -[sig-node] Pods -test/e2e/common/node/framework.go:23 - should delete a collection of pods [Conformance] - test/e2e/common/node/pods.go:845 +• [SLOW TEST] [13.801 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + works for multiple CRDs of same group and version but different kinds [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:357 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:03:32.494 - Jul 29 16:03:32.494: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 16:03:32.497 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:03:32.528 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:03:32.532 - [BeforeEach] [sig-node] Pods + STEP: Creating a kubernetes client 08/24/23 12:08:21.094 + Aug 24 12:08:21.094: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:08:21.097 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:21.131 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:21.136 + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 - [It] should delete a collection of pods [Conformance] - test/e2e/common/node/pods.go:845 - STEP: Create set of pods 07/29/23 16:03:32.537 - Jul 29 16:03:32.551: INFO: created test-pod-1 - Jul 29 16:03:32.560: INFO: created test-pod-2 - Jul 29 16:03:32.576: INFO: created test-pod-3 - STEP: waiting for all 3 pods to be running 07/29/23 16:03:32.577 - Jul 29 16:03:32.577: INFO: Waiting up to 5m0s for all pods (need at least 3) in namespace 'pods-9917' to be running and ready - Jul 29 16:03:32.607: INFO: The status of Pod test-pod-1 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed - Jul 29 16:03:32.607: INFO: The status of Pod test-pod-2 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed - Jul 29 16:03:32.607: INFO: The status of Pod test-pod-3 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed - Jul 29 16:03:32.607: INFO: 0 / 3 pods in namespace 'pods-9917' are running and ready (0 seconds elapsed) - Jul 29 16:03:32.607: INFO: expected 0 pod replicas in namespace 'pods-9917', 0 are Running and Ready. - Jul 29 16:03:32.607: INFO: POD NODE PHASE GRACE CONDITIONS - Jul 29 16:03:32.607: INFO: test-pod-1 wetuj3nuajog-3 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC ContainersNotReady containers with unready status: [token-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC ContainersNotReady containers with unready status: [token-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC }] - Jul 29 16:03:32.608: INFO: test-pod-2 wetuj3nuajog-3 Pending [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC }] - Jul 29 16:03:32.608: INFO: test-pod-3 wetuj3nuajog-3 Pending [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 16:03:32 +0000 UTC }] - Jul 29 16:03:32.608: INFO: - Jul 29 16:03:34.626: INFO: 3 / 3 pods in namespace 'pods-9917' are running and ready (2 seconds elapsed) - Jul 29 16:03:34.626: INFO: expected 0 pod replicas in namespace 'pods-9917', 0 are Running and Ready. - STEP: waiting for all pods to be deleted 07/29/23 16:03:34.662 - Jul 29 16:03:34.670: INFO: Pod quantity 3 is different from expected quantity 0 - Jul 29 16:03:35.692: INFO: Pod quantity 3 is different from expected quantity 0 - Jul 29 16:03:36.681: INFO: Pod quantity 3 is different from expected quantity 0 - Jul 29 16:03:37.684: INFO: Pod quantity 1 is different from expected quantity 0 - [AfterEach] [sig-node] Pods + [It] works for multiple CRDs of same group and version but different kinds [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:357 + STEP: CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation 08/24/23 12:08:21.142 + Aug 24 12:08:21.145: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:24.743: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:03:38.677: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods + Aug 24 12:08:34.853: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "pods-9917" for this suite. 07/29/23 16:03:38.685 + STEP: Destroying namespace "crd-publish-openapi-8912" for this suite. 08/24/23 12:08:34.878 << End Captured GinkgoWriter Output ------------------------------ -[sig-node] Variable Expansion - should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] - test/e2e/common/node/expansion.go:186 -[BeforeEach] [sig-node] Variable Expansion +SSSSSSS +------------------------------ +[sig-node] KubeletManagedEtcHosts + should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/kubelet_etc_hosts.go:63 +[BeforeEach] [sig-node] KubeletManagedEtcHosts set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:03:38.699 -Jul 29 16:03:38.700: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename var-expansion 07/29/23 16:03:38.706 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:03:38.732 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:03:38.736 -[BeforeEach] [sig-node] Variable Expansion +STEP: Creating a kubernetes client 08/24/23 12:08:34.895 +Aug 24 12:08:34.896: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts 08/24/23 12:08:34.897 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:34.933 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:34.94 +[BeforeEach] [sig-node] KubeletManagedEtcHosts test/e2e/framework/metrics/init/init.go:31 -[It] should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] - test/e2e/common/node/expansion.go:186 -Jul 29 16:03:38.755: INFO: Waiting up to 2m0s for pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d" in namespace "var-expansion-8802" to be "container 0 failed with reason CreateContainerConfigError" -Jul 29 16:03:38.760: INFO: Pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d": Phase="Pending", Reason="", readiness=false. Elapsed: 4.681323ms -Jul 29 16:03:40.768: INFO: Pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012339452s -Jul 29 16:03:40.768: INFO: Pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d" satisfied condition "container 0 failed with reason CreateContainerConfigError" -Jul 29 16:03:40.768: INFO: Deleting pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d" in namespace "var-expansion-8802" -Jul 29 16:03:40.793: INFO: Wait up to 5m0s for pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d" to be fully deleted -[AfterEach] [sig-node] Variable Expansion +[It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/kubelet_etc_hosts.go:63 +STEP: Setting up the test 08/24/23 12:08:34.945 +STEP: Creating hostNetwork=false pod 08/24/23 12:08:34.946 +Aug 24 12:08:34.973: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "e2e-kubelet-etc-hosts-6083" to be "running and ready" +Aug 24 12:08:34.996: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 23.457206ms +Aug 24 12:08:34.997: INFO: The phase of Pod test-pod is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:08:37.008: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.034678038s +Aug 24 12:08:37.008: INFO: The phase of Pod test-pod is Running (Ready = true) +Aug 24 12:08:37.008: INFO: Pod "test-pod" satisfied condition "running and ready" +STEP: Creating hostNetwork=true pod 08/24/23 12:08:37.013 +Aug 24 12:08:37.024: INFO: Waiting up to 5m0s for pod "test-host-network-pod" in namespace "e2e-kubelet-etc-hosts-6083" to be "running and ready" +Aug 24 12:08:37.033: INFO: Pod "test-host-network-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 8.993361ms +Aug 24 12:08:37.037: INFO: The phase of Pod test-host-network-pod is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:08:39.045: INFO: Pod "test-host-network-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.020861261s +Aug 24 12:08:39.045: INFO: The phase of Pod test-host-network-pod is Running (Ready = true) +Aug 24 12:08:39.045: INFO: Pod "test-host-network-pod" satisfied condition "running and ready" +STEP: Running the test 08/24/23 12:08:39.061 +STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false 08/24/23 12:08:39.061 +Aug 24 12:08:39.061: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:08:39.062: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:39.063: INFO: ExecWithOptions: Clientset creation +Aug 24 12:08:39.064: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-1&container=busybox-1&stderr=true&stdout=true) +Aug 24 12:08:39.196: INFO: Exec stderr: "" +Aug 24 12:08:39.196: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:08:39.196: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:39.198: INFO: ExecWithOptions: Clientset creation +Aug 24 12:08:39.199: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-1&container=busybox-1&stderr=true&stdout=true) +Aug 24 12:08:39.336: INFO: Exec stderr: "" +Aug 24 12:08:39.337: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:08:39.337: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:39.338: INFO: ExecWithOptions: Clientset creation +Aug 24 12:08:39.338: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-2&container=busybox-2&stderr=true&stdout=true) +Aug 24 12:08:39.462: INFO: Exec stderr: "" +Aug 24 12:08:39.462: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:08:39.462: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:39.465: INFO: ExecWithOptions: Clientset creation +Aug 24 12:08:39.465: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-2&container=busybox-2&stderr=true&stdout=true) +Aug 24 12:08:39.579: INFO: Exec stderr: "" +STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount 08/24/23 12:08:39.579 +Aug 24 12:08:39.580: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:08:39.580: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:39.581: INFO: ExecWithOptions: Clientset creation +Aug 24 12:08:39.582: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-3&container=busybox-3&stderr=true&stdout=true) +Aug 24 12:08:39.726: INFO: Exec stderr: "" +Aug 24 12:08:39.726: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:08:39.726: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:39.729: INFO: ExecWithOptions: Clientset creation +Aug 24 12:08:39.729: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-3&container=busybox-3&stderr=true&stdout=true) +Aug 24 12:08:39.836: INFO: Exec stderr: "" +STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true 08/24/23 12:08:39.836 +Aug 24 12:08:39.837: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:08:39.837: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:39.839: INFO: ExecWithOptions: Clientset creation +Aug 24 12:08:39.839: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-1&container=busybox-1&stderr=true&stdout=true) +Aug 24 12:08:39.978: INFO: Exec stderr: "" +Aug 24 12:08:39.978: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:08:39.978: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:39.980: INFO: ExecWithOptions: Clientset creation +Aug 24 12:08:39.981: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-1&container=busybox-1&stderr=true&stdout=true) +Aug 24 12:08:40.116: INFO: Exec stderr: "" +Aug 24 12:08:40.116: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:08:40.116: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:40.118: INFO: ExecWithOptions: Clientset creation +Aug 24 12:08:40.119: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-2&container=busybox-2&stderr=true&stdout=true) +Aug 24 12:08:40.263: INFO: Exec stderr: "" +Aug 24 12:08:40.263: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:08:40.263: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:08:40.265: INFO: ExecWithOptions: Clientset creation +Aug 24 12:08:40.265: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-2&container=busybox-2&stderr=true&stdout=true) +Aug 24 12:08:40.408: INFO: Exec stderr: "" +[AfterEach] [sig-node] KubeletManagedEtcHosts test/e2e/framework/node/init/init.go:32 -Jul 29 16:03:42.805: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Variable Expansion +Aug 24 12:08:40.410: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts tear down framework | framework.go:193 -STEP: Destroying namespace "var-expansion-8802" for this suite. 07/29/23 16:03:42.814 +STEP: Destroying namespace "e2e-kubelet-etc-hosts-6083" for this suite. 08/24/23 12:08:40.424 ------------------------------ -• [4.127 seconds] -[sig-node] Variable Expansion +• [SLOW TEST] [5.541 seconds] +[sig-node] KubeletManagedEtcHosts test/e2e/common/node/framework.go:23 - should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] - test/e2e/common/node/expansion.go:186 + should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/kubelet_etc_hosts.go:63 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Variable Expansion + [BeforeEach] [sig-node] KubeletManagedEtcHosts set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:03:38.699 - Jul 29 16:03:38.700: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename var-expansion 07/29/23 16:03:38.706 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:03:38.732 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:03:38.736 - [BeforeEach] [sig-node] Variable Expansion + STEP: Creating a kubernetes client 08/24/23 12:08:34.895 + Aug 24 12:08:34.896: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts 08/24/23 12:08:34.897 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:34.933 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:34.94 + [BeforeEach] [sig-node] KubeletManagedEtcHosts test/e2e/framework/metrics/init/init.go:31 - [It] should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] - test/e2e/common/node/expansion.go:186 - Jul 29 16:03:38.755: INFO: Waiting up to 2m0s for pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d" in namespace "var-expansion-8802" to be "container 0 failed with reason CreateContainerConfigError" - Jul 29 16:03:38.760: INFO: Pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d": Phase="Pending", Reason="", readiness=false. Elapsed: 4.681323ms - Jul 29 16:03:40.768: INFO: Pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012339452s - Jul 29 16:03:40.768: INFO: Pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d" satisfied condition "container 0 failed with reason CreateContainerConfigError" - Jul 29 16:03:40.768: INFO: Deleting pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d" in namespace "var-expansion-8802" - Jul 29 16:03:40.793: INFO: Wait up to 5m0s for pod "var-expansion-a0f9f932-24f9-4280-8404-c8e0f303910d" to be fully deleted - [AfterEach] [sig-node] Variable Expansion + [It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/kubelet_etc_hosts.go:63 + STEP: Setting up the test 08/24/23 12:08:34.945 + STEP: Creating hostNetwork=false pod 08/24/23 12:08:34.946 + Aug 24 12:08:34.973: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "e2e-kubelet-etc-hosts-6083" to be "running and ready" + Aug 24 12:08:34.996: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 23.457206ms + Aug 24 12:08:34.997: INFO: The phase of Pod test-pod is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:08:37.008: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.034678038s + Aug 24 12:08:37.008: INFO: The phase of Pod test-pod is Running (Ready = true) + Aug 24 12:08:37.008: INFO: Pod "test-pod" satisfied condition "running and ready" + STEP: Creating hostNetwork=true pod 08/24/23 12:08:37.013 + Aug 24 12:08:37.024: INFO: Waiting up to 5m0s for pod "test-host-network-pod" in namespace "e2e-kubelet-etc-hosts-6083" to be "running and ready" + Aug 24 12:08:37.033: INFO: Pod "test-host-network-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 8.993361ms + Aug 24 12:08:37.037: INFO: The phase of Pod test-host-network-pod is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:08:39.045: INFO: Pod "test-host-network-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.020861261s + Aug 24 12:08:39.045: INFO: The phase of Pod test-host-network-pod is Running (Ready = true) + Aug 24 12:08:39.045: INFO: Pod "test-host-network-pod" satisfied condition "running and ready" + STEP: Running the test 08/24/23 12:08:39.061 + STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false 08/24/23 12:08:39.061 + Aug 24 12:08:39.061: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:08:39.062: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:39.063: INFO: ExecWithOptions: Clientset creation + Aug 24 12:08:39.064: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-1&container=busybox-1&stderr=true&stdout=true) + Aug 24 12:08:39.196: INFO: Exec stderr: "" + Aug 24 12:08:39.196: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:08:39.196: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:39.198: INFO: ExecWithOptions: Clientset creation + Aug 24 12:08:39.199: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-1&container=busybox-1&stderr=true&stdout=true) + Aug 24 12:08:39.336: INFO: Exec stderr: "" + Aug 24 12:08:39.337: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:08:39.337: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:39.338: INFO: ExecWithOptions: Clientset creation + Aug 24 12:08:39.338: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-2&container=busybox-2&stderr=true&stdout=true) + Aug 24 12:08:39.462: INFO: Exec stderr: "" + Aug 24 12:08:39.462: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:08:39.462: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:39.465: INFO: ExecWithOptions: Clientset creation + Aug 24 12:08:39.465: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-2&container=busybox-2&stderr=true&stdout=true) + Aug 24 12:08:39.579: INFO: Exec stderr: "" + STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount 08/24/23 12:08:39.579 + Aug 24 12:08:39.580: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:08:39.580: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:39.581: INFO: ExecWithOptions: Clientset creation + Aug 24 12:08:39.582: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-3&container=busybox-3&stderr=true&stdout=true) + Aug 24 12:08:39.726: INFO: Exec stderr: "" + Aug 24 12:08:39.726: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:08:39.726: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:39.729: INFO: ExecWithOptions: Clientset creation + Aug 24 12:08:39.729: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-3&container=busybox-3&stderr=true&stdout=true) + Aug 24 12:08:39.836: INFO: Exec stderr: "" + STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true 08/24/23 12:08:39.836 + Aug 24 12:08:39.837: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:08:39.837: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:39.839: INFO: ExecWithOptions: Clientset creation + Aug 24 12:08:39.839: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-1&container=busybox-1&stderr=true&stdout=true) + Aug 24 12:08:39.978: INFO: Exec stderr: "" + Aug 24 12:08:39.978: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:08:39.978: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:39.980: INFO: ExecWithOptions: Clientset creation + Aug 24 12:08:39.981: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-1&container=busybox-1&stderr=true&stdout=true) + Aug 24 12:08:40.116: INFO: Exec stderr: "" + Aug 24 12:08:40.116: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:08:40.116: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:40.118: INFO: ExecWithOptions: Clientset creation + Aug 24 12:08:40.119: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-2&container=busybox-2&stderr=true&stdout=true) + Aug 24 12:08:40.263: INFO: Exec stderr: "" + Aug 24 12:08:40.263: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-6083 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:08:40.263: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:08:40.265: INFO: ExecWithOptions: Clientset creation + Aug 24 12:08:40.265: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-6083/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-2&container=busybox-2&stderr=true&stdout=true) + Aug 24 12:08:40.408: INFO: Exec stderr: "" + [AfterEach] [sig-node] KubeletManagedEtcHosts test/e2e/framework/node/init/init.go:32 - Jul 29 16:03:42.805: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Variable Expansion + Aug 24 12:08:40.410: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts tear down framework | framework.go:193 - STEP: Destroying namespace "var-expansion-8802" for this suite. 07/29/23 16:03:42.814 + STEP: Destroying namespace "e2e-kubelet-etc-hosts-6083" for this suite. 08/24/23 12:08:40.424 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] CronJob - should not schedule jobs when suspended [Slow] [Conformance] - test/e2e/apps/cronjob.go:96 -[BeforeEach] [sig-apps] CronJob +[sig-storage] Downward API volume + should update labels on modification [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:130 +[BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:03:42.835 -Jul 29 16:03:42.835: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename cronjob 07/29/23 16:03:42.837 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:03:42.874 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:03:42.88 -[BeforeEach] [sig-apps] CronJob +STEP: Creating a kubernetes client 08/24/23 12:08:40.45 +Aug 24 12:08:40.450: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:08:40.452 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:40.482 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:40.485 +[BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 -[It] should not schedule jobs when suspended [Slow] [Conformance] - test/e2e/apps/cronjob.go:96 -STEP: Creating a suspended cronjob 07/29/23 16:03:42.886 -STEP: Ensuring no jobs are scheduled 07/29/23 16:03:42.928 -STEP: Ensuring no job exists by listing jobs explicitly 07/29/23 16:08:42.942 -STEP: Removing cronjob 07/29/23 16:08:42.948 -[AfterEach] [sig-apps] CronJob +[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 +[It] should update labels on modification [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:130 +STEP: Creating the pod 08/24/23 12:08:40.49 +Aug 24 12:08:40.504: INFO: Waiting up to 5m0s for pod "labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657" in namespace "downward-api-7059" to be "running and ready" +Aug 24 12:08:40.510: INFO: Pod "labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657": Phase="Pending", Reason="", readiness=false. Elapsed: 5.995477ms +Aug 24 12:08:40.511: INFO: The phase of Pod labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:08:42.518: INFO: Pod "labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657": Phase="Running", Reason="", readiness=true. Elapsed: 2.013559936s +Aug 24 12:08:42.518: INFO: The phase of Pod labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657 is Running (Ready = true) +Aug 24 12:08:42.518: INFO: Pod "labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657" satisfied condition "running and ready" +Aug 24 12:08:43.075: INFO: Successfully updated pod "labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657" +[AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 16:08:42.959: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] CronJob +Aug 24 12:08:45.115: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] CronJob +[DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] CronJob +[DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "cronjob-106" for this suite. 07/29/23 16:08:42.972 +STEP: Destroying namespace "downward-api-7059" for this suite. 08/24/23 12:08:45.136 ------------------------------ -• [SLOW TEST] [300.148 seconds] -[sig-apps] CronJob -test/e2e/apps/framework.go:23 - should not schedule jobs when suspended [Slow] [Conformance] - test/e2e/apps/cronjob.go:96 +• [4.699 seconds] +[sig-storage] Downward API volume +test/e2e/common/storage/framework.go:23 + should update labels on modification [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:130 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] CronJob + [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:03:42.835 - Jul 29 16:03:42.835: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename cronjob 07/29/23 16:03:42.837 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:03:42.874 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:03:42.88 - [BeforeEach] [sig-apps] CronJob + STEP: Creating a kubernetes client 08/24/23 12:08:40.45 + Aug 24 12:08:40.450: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:08:40.452 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:40.482 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:40.485 + [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 - [It] should not schedule jobs when suspended [Slow] [Conformance] - test/e2e/apps/cronjob.go:96 - STEP: Creating a suspended cronjob 07/29/23 16:03:42.886 - STEP: Ensuring no jobs are scheduled 07/29/23 16:03:42.928 - STEP: Ensuring no job exists by listing jobs explicitly 07/29/23 16:08:42.942 - STEP: Removing cronjob 07/29/23 16:08:42.948 - [AfterEach] [sig-apps] CronJob + [BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 + [It] should update labels on modification [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:130 + STEP: Creating the pod 08/24/23 12:08:40.49 + Aug 24 12:08:40.504: INFO: Waiting up to 5m0s for pod "labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657" in namespace "downward-api-7059" to be "running and ready" + Aug 24 12:08:40.510: INFO: Pod "labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657": Phase="Pending", Reason="", readiness=false. Elapsed: 5.995477ms + Aug 24 12:08:40.511: INFO: The phase of Pod labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:08:42.518: INFO: Pod "labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657": Phase="Running", Reason="", readiness=true. Elapsed: 2.013559936s + Aug 24 12:08:42.518: INFO: The phase of Pod labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657 is Running (Ready = true) + Aug 24 12:08:42.518: INFO: Pod "labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657" satisfied condition "running and ready" + Aug 24 12:08:43.075: INFO: Successfully updated pod "labelsupdate0558dfc0-d665-48e8-b8e1-da9e4894e657" + [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 16:08:42.959: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] CronJob + Aug 24 12:08:45.115: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] CronJob + [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] CronJob + [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "cronjob-106" for this suite. 07/29/23 16:08:42.972 + STEP: Destroying namespace "downward-api-7059" for this suite. 08/24/23 12:08:45.136 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] DNS - should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] - test/e2e/network/dns.go:193 -[BeforeEach] [sig-network] DNS +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a replica set. [Conformance] + test/e2e/apimachinery/resource_quota.go:448 +[BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:08:42.985 -Jul 29 16:08:42.985: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename dns 07/29/23 16:08:42.988 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:08:43.016 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:08:43.021 -[BeforeEach] [sig-network] DNS - test/e2e/framework/metrics/init/init.go:31 -[It] should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] - test/e2e/network/dns.go:193 -STEP: Creating a test headless service 07/29/23 16:08:43.027 -STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-3074 A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-3074;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-3074 A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-3074;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-3074.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-3074.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-3074.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-3074.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-3074.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-3074.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-3074.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-3074.svc;check="$$(dig +notcp +noall +answer +search 32.37.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.37.32_udp@PTR;check="$$(dig +tcp +noall +answer +search 32.37.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.37.32_tcp@PTR;sleep 1; done - 07/29/23 16:08:43.058 -STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-3074 A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-3074;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-3074 A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-3074;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-3074.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-3074.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-3074.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-3074.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-3074.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-3074.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-3074.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-3074.svc;check="$$(dig +notcp +noall +answer +search 32.37.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.37.32_udp@PTR;check="$$(dig +tcp +noall +answer +search 32.37.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.37.32_tcp@PTR;sleep 1; done - 07/29/23 16:08:43.059 -STEP: creating a pod to probe DNS 07/29/23 16:08:43.059 -STEP: submitting the pod to kubernetes 07/29/23 16:08:43.059 -Jul 29 16:08:43.083: INFO: Waiting up to 15m0s for pod "dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b" in namespace "dns-3074" to be "running" -Jul 29 16:08:43.094: INFO: Pod "dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b": Phase="Pending", Reason="", readiness=false. Elapsed: 10.922365ms -Jul 29 16:08:45.104: INFO: Pod "dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b": Phase="Running", Reason="", readiness=true. Elapsed: 2.021125238s -Jul 29 16:08:45.104: INFO: Pod "dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b" satisfied condition "running" -STEP: retrieving the pod 07/29/23 16:08:45.104 -STEP: looking for the results for each expected name from probers 07/29/23 16:08:45.112 -Jul 29 16:08:45.125: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.136: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.144: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.150: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.158: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.165: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.172: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.180: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.213: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.221: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.229: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.236: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.244: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.255: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.271: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:45.310: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc wheezy_udp@_http._tcp.dns-test-service.dns-3074.svc wheezy_tcp@_http._tcp.dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc jessie_tcp@_http._tcp.dns-test-service.dns-3074.svc] - -Jul 29 16:08:50.326: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.333: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.338: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.343: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.350: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.354: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.398: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.407: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.412: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.420: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.431: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.438: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:50.490: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc] - -Jul 29 16:08:55.335: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.346: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.354: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.363: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.370: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.377: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.420: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.424: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.430: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.437: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.441: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.447: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:08:55.488: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc] - -Jul 29 16:09:00.324: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.334: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.340: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.347: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.355: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.362: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.407: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.413: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.421: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.428: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.436: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.442: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:00.490: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc] - -Jul 29 16:09:05.326: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.339: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.346: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.354: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.362: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.371: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.425: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.432: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.438: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.448: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.460: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.467: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:05.510: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc] - -Jul 29 16:09:10.323: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.333: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.340: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.352: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.360: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.369: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.440: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.448: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.457: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.465: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.471: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.480: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) -Jul 29 16:09:10.534: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc] - -Jul 29 16:09:15.522: INFO: DNS probes using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b succeeded - -STEP: deleting the pod 07/29/23 16:09:15.522 -STEP: deleting the test service 07/29/23 16:09:15.629 -STEP: deleting the test headless service 07/29/23 16:09:15.782 -[AfterEach] [sig-network] DNS +STEP: Creating a kubernetes client 08/24/23 12:08:45.155 +Aug 24 12:08:45.155: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 12:08:45.156 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:45.186 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:45.191 +[BeforeEach] [sig-api-machinery] ResourceQuota + test/e2e/framework/metrics/init/init.go:31 +[It] should create a ResourceQuota and capture the life of a replica set. [Conformance] + test/e2e/apimachinery/resource_quota.go:448 +STEP: Counting existing ResourceQuota 08/24/23 12:08:45.197 +STEP: Creating a ResourceQuota 08/24/23 12:08:50.204 +STEP: Ensuring resource quota status is calculated 08/24/23 12:08:50.215 +STEP: Creating a ReplicaSet 08/24/23 12:08:52.224 +STEP: Ensuring resource quota status captures replicaset creation 08/24/23 12:08:52.25 +STEP: Deleting a ReplicaSet 08/24/23 12:08:54.259 +STEP: Ensuring resource quota status released usage 08/24/23 12:08:54.271 +[AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 16:09:15.904: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] DNS +Aug 24 12:08:56.280: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "dns-3074" for this suite. 07/29/23 16:09:15.917 +STEP: Destroying namespace "resourcequota-3263" for this suite. 08/24/23 12:08:56.289 ------------------------------ -• [SLOW TEST] [32.944 seconds] -[sig-network] DNS -test/e2e/network/common/framework.go:23 - should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] - test/e2e/network/dns.go:193 +• [SLOW TEST] [11.149 seconds] +[sig-api-machinery] ResourceQuota +test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a replica set. [Conformance] + test/e2e/apimachinery/resource_quota.go:448 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] DNS + [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:08:42.985 - Jul 29 16:08:42.985: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename dns 07/29/23 16:08:42.988 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:08:43.016 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:08:43.021 - [BeforeEach] [sig-network] DNS + STEP: Creating a kubernetes client 08/24/23 12:08:45.155 + Aug 24 12:08:45.155: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 12:08:45.156 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:45.186 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:45.191 + [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [It] should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] - test/e2e/network/dns.go:193 - STEP: Creating a test headless service 07/29/23 16:08:43.027 - STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-3074 A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-3074;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-3074 A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-3074;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-3074.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-3074.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-3074.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-3074.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-3074.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-3074.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-3074.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-3074.svc;check="$$(dig +notcp +noall +answer +search 32.37.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.37.32_udp@PTR;check="$$(dig +tcp +noall +answer +search 32.37.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.37.32_tcp@PTR;sleep 1; done - 07/29/23 16:08:43.058 - STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-3074 A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-3074;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-3074 A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-3074;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-3074.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-3074.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-3074.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-3074.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-3074.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-3074.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-3074.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-3074.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-3074.svc;check="$$(dig +notcp +noall +answer +search 32.37.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.37.32_udp@PTR;check="$$(dig +tcp +noall +answer +search 32.37.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.37.32_tcp@PTR;sleep 1; done - 07/29/23 16:08:43.059 - STEP: creating a pod to probe DNS 07/29/23 16:08:43.059 - STEP: submitting the pod to kubernetes 07/29/23 16:08:43.059 - Jul 29 16:08:43.083: INFO: Waiting up to 15m0s for pod "dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b" in namespace "dns-3074" to be "running" - Jul 29 16:08:43.094: INFO: Pod "dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b": Phase="Pending", Reason="", readiness=false. Elapsed: 10.922365ms - Jul 29 16:08:45.104: INFO: Pod "dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b": Phase="Running", Reason="", readiness=true. Elapsed: 2.021125238s - Jul 29 16:08:45.104: INFO: Pod "dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b" satisfied condition "running" - STEP: retrieving the pod 07/29/23 16:08:45.104 - STEP: looking for the results for each expected name from probers 07/29/23 16:08:45.112 - Jul 29 16:08:45.125: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.136: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.144: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.150: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.158: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.165: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.172: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.180: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.213: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.221: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.229: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.236: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.244: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.255: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.271: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:45.310: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc wheezy_udp@_http._tcp.dns-test-service.dns-3074.svc wheezy_tcp@_http._tcp.dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc jessie_tcp@_http._tcp.dns-test-service.dns-3074.svc] - - Jul 29 16:08:50.326: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.333: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.338: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.343: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.350: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.354: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.398: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.407: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.412: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.420: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.431: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.438: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:50.490: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc] - - Jul 29 16:08:55.335: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.346: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.354: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.363: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.370: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.377: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.420: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.424: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.430: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.437: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.441: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.447: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:08:55.488: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc] - - Jul 29 16:09:00.324: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.334: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.340: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.347: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.355: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.362: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.407: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.413: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.421: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.428: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.436: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.442: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:00.490: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc] - - Jul 29 16:09:05.326: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.339: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.346: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.354: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.362: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.371: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.425: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.432: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.438: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.448: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.460: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.467: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:05.510: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc] - - Jul 29 16:09:10.323: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.333: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.340: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.352: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.360: INFO: Unable to read wheezy_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.369: INFO: Unable to read wheezy_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.440: INFO: Unable to read jessie_udp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.448: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.457: INFO: Unable to read jessie_udp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.465: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074 from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.471: INFO: Unable to read jessie_udp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.480: INFO: Unable to read jessie_tcp@dns-test-service.dns-3074.svc from pod dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b: the server could not find the requested resource (get pods dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b) - Jul 29 16:09:10.534: INFO: Lookups using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-3074 wheezy_tcp@dns-test-service.dns-3074 wheezy_udp@dns-test-service.dns-3074.svc wheezy_tcp@dns-test-service.dns-3074.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-3074 jessie_tcp@dns-test-service.dns-3074 jessie_udp@dns-test-service.dns-3074.svc jessie_tcp@dns-test-service.dns-3074.svc] - - Jul 29 16:09:15.522: INFO: DNS probes using dns-3074/dns-test-c54ba03c-893e-4431-838b-5cf2f06d168b succeeded - - STEP: deleting the pod 07/29/23 16:09:15.522 - STEP: deleting the test service 07/29/23 16:09:15.629 - STEP: deleting the test headless service 07/29/23 16:09:15.782 - [AfterEach] [sig-network] DNS + [It] should create a ResourceQuota and capture the life of a replica set. [Conformance] + test/e2e/apimachinery/resource_quota.go:448 + STEP: Counting existing ResourceQuota 08/24/23 12:08:45.197 + STEP: Creating a ResourceQuota 08/24/23 12:08:50.204 + STEP: Ensuring resource quota status is calculated 08/24/23 12:08:50.215 + STEP: Creating a ReplicaSet 08/24/23 12:08:52.224 + STEP: Ensuring resource quota status captures replicaset creation 08/24/23 12:08:52.25 + STEP: Deleting a ReplicaSet 08/24/23 12:08:54.259 + STEP: Ensuring resource quota status released usage 08/24/23 12:08:54.271 + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:09:15.904: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] DNS + Aug 24 12:08:56.280: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "dns-3074" for this suite. 07/29/23 16:09:15.917 + STEP: Destroying namespace "resourcequota-3263" for this suite. 08/24/23 12:08:56.289 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSS +SSSSSSSSSSS ------------------------------ -[sig-auth] ServiceAccounts - should guarantee kube-root-ca.crt exist in any namespace [Conformance] - test/e2e/auth/service_accounts.go:742 -[BeforeEach] [sig-auth] ServiceAccounts +[sig-api-machinery] ResourceQuota + should manage the lifecycle of a ResourceQuota [Conformance] + test/e2e/apimachinery/resource_quota.go:943 +[BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:09:15.943 -Jul 29 16:09:15.943: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename svcaccounts 07/29/23 16:09:15.953 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:16.01 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:16.019 -[BeforeEach] [sig-auth] ServiceAccounts +STEP: Creating a kubernetes client 08/24/23 12:08:56.311 +Aug 24 12:08:56.312: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 12:08:56.313 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:56.344 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:56.349 +[BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 -[It] should guarantee kube-root-ca.crt exist in any namespace [Conformance] - test/e2e/auth/service_accounts.go:742 -Jul 29 16:09:16.034: INFO: Got root ca configmap in namespace "svcaccounts-7131" -Jul 29 16:09:16.047: INFO: Deleted root ca configmap in namespace "svcaccounts-7131" -STEP: waiting for a new root ca configmap created 07/29/23 16:09:16.548 -Jul 29 16:09:16.556: INFO: Recreated root ca configmap in namespace "svcaccounts-7131" -Jul 29 16:09:16.571: INFO: Updated root ca configmap in namespace "svcaccounts-7131" -STEP: waiting for the root ca configmap reconciled 07/29/23 16:09:17.072 -Jul 29 16:09:17.080: INFO: Reconciled root ca configmap in namespace "svcaccounts-7131" -[AfterEach] [sig-auth] ServiceAccounts +[It] should manage the lifecycle of a ResourceQuota [Conformance] + test/e2e/apimachinery/resource_quota.go:943 +STEP: Creating a ResourceQuota 08/24/23 12:08:56.355 +STEP: Getting a ResourceQuota 08/24/23 12:08:56.364 +STEP: Listing all ResourceQuotas with LabelSelector 08/24/23 12:08:56.371 +STEP: Patching the ResourceQuota 08/24/23 12:08:56.379 +STEP: Deleting a Collection of ResourceQuotas 08/24/23 12:08:56.394 +STEP: Verifying the deleted ResourceQuota 08/24/23 12:08:56.412 +[AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 16:09:17.080: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +Aug 24 12:08:56.417: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "svcaccounts-7131" for this suite. 07/29/23 16:09:17.093 +STEP: Destroying namespace "resourcequota-4148" for this suite. 08/24/23 12:08:56.425 ------------------------------ -• [1.163 seconds] -[sig-auth] ServiceAccounts -test/e2e/auth/framework.go:23 - should guarantee kube-root-ca.crt exist in any namespace [Conformance] - test/e2e/auth/service_accounts.go:742 +• [0.127 seconds] +[sig-api-machinery] ResourceQuota +test/e2e/apimachinery/framework.go:23 + should manage the lifecycle of a ResourceQuota [Conformance] + test/e2e/apimachinery/resource_quota.go:943 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-auth] ServiceAccounts + [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:09:15.943 - Jul 29 16:09:15.943: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename svcaccounts 07/29/23 16:09:15.953 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:16.01 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:16.019 - [BeforeEach] [sig-auth] ServiceAccounts + STEP: Creating a kubernetes client 08/24/23 12:08:56.311 + Aug 24 12:08:56.312: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 12:08:56.313 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:56.344 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:56.349 + [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [It] should guarantee kube-root-ca.crt exist in any namespace [Conformance] - test/e2e/auth/service_accounts.go:742 - Jul 29 16:09:16.034: INFO: Got root ca configmap in namespace "svcaccounts-7131" - Jul 29 16:09:16.047: INFO: Deleted root ca configmap in namespace "svcaccounts-7131" - STEP: waiting for a new root ca configmap created 07/29/23 16:09:16.548 - Jul 29 16:09:16.556: INFO: Recreated root ca configmap in namespace "svcaccounts-7131" - Jul 29 16:09:16.571: INFO: Updated root ca configmap in namespace "svcaccounts-7131" - STEP: waiting for the root ca configmap reconciled 07/29/23 16:09:17.072 - Jul 29 16:09:17.080: INFO: Reconciled root ca configmap in namespace "svcaccounts-7131" - [AfterEach] [sig-auth] ServiceAccounts + [It] should manage the lifecycle of a ResourceQuota [Conformance] + test/e2e/apimachinery/resource_quota.go:943 + STEP: Creating a ResourceQuota 08/24/23 12:08:56.355 + STEP: Getting a ResourceQuota 08/24/23 12:08:56.364 + STEP: Listing all ResourceQuotas with LabelSelector 08/24/23 12:08:56.371 + STEP: Patching the ResourceQuota 08/24/23 12:08:56.379 + STEP: Deleting a Collection of ResourceQuotas 08/24/23 12:08:56.394 + STEP: Verifying the deleted ResourceQuota 08/24/23 12:08:56.412 + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:09:17.080: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + Aug 24 12:08:56.417: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "svcaccounts-7131" for this suite. 07/29/23 16:09:17.093 + STEP: Destroying namespace "resourcequota-4148" for this suite. 08/24/23 12:08:56.425 << End Captured GinkgoWriter Output ------------------------------ -SS +SSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for CRD preserving unknown fields at the schema root [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:194 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces + should list and delete a collection of PodDisruptionBudgets [Conformance] + test/e2e/apps/disruption.go:87 +[BeforeEach] [sig-apps] DisruptionController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:09:17.106 -Jul 29 16:09:17.106: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 16:09:17.108 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:17.146 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:17.152 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:08:56.44 +Aug 24 12:08:56.441: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename disruption 08/24/23 12:08:56.443 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:56.486 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:56.491 +[BeforeEach] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:31 -[It] works for CRD preserving unknown fields at the schema root [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:194 -Jul 29 16:09:17.160: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 07/29/23 16:09:20.969 -Jul 29 16:09:20.970: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-1257 --namespace=crd-publish-openapi-1257 create -f -' -Jul 29 16:09:22.716: INFO: stderr: "" -Jul 29 16:09:22.717: INFO: stdout: "e2e-test-crd-publish-openapi-5957-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" -Jul 29 16:09:22.717: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-1257 --namespace=crd-publish-openapi-1257 delete e2e-test-crd-publish-openapi-5957-crds test-cr' -Jul 29 16:09:22.930: INFO: stderr: "" -Jul 29 16:09:22.930: INFO: stdout: "e2e-test-crd-publish-openapi-5957-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" -Jul 29 16:09:22.931: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-1257 --namespace=crd-publish-openapi-1257 apply -f -' -Jul 29 16:09:24.064: INFO: stderr: "" -Jul 29 16:09:24.064: INFO: stdout: "e2e-test-crd-publish-openapi-5957-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" -Jul 29 16:09:24.065: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-1257 --namespace=crd-publish-openapi-1257 delete e2e-test-crd-publish-openapi-5957-crds test-cr' -Jul 29 16:09:24.200: INFO: stderr: "" -Jul 29 16:09:24.200: INFO: stdout: "e2e-test-crd-publish-openapi-5957-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" -STEP: kubectl explain works to explain CR 07/29/23 16:09:24.2 -Jul 29 16:09:24.200: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-1257 explain e2e-test-crd-publish-openapi-5957-crds' -Jul 29 16:09:24.666: INFO: stderr: "" -Jul 29 16:09:24.666: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-5957-crd\nVERSION: crd-publish-openapi-test-unknown-at-root.example.com/v1\n\nDESCRIPTION:\n \n" -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [sig-apps] DisruptionController + test/e2e/apps/disruption.go:72 +[BeforeEach] Listing PodDisruptionBudgets for all namespaces + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:08:56.495 +Aug 24 12:08:56.495: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename disruption-2 08/24/23 12:08:56.496 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:56.53 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:56.533 +[BeforeEach] Listing PodDisruptionBudgets for all namespaces + test/e2e/framework/metrics/init/init.go:31 +[It] should list and delete a collection of PodDisruptionBudgets [Conformance] + test/e2e/apps/disruption.go:87 +STEP: Waiting for the pdb to be processed 08/24/23 12:08:56.545 +STEP: Waiting for the pdb to be processed 08/24/23 12:08:58.563 +STEP: Waiting for the pdb to be processed 08/24/23 12:09:00.587 +STEP: listing a collection of PDBs across all namespaces 08/24/23 12:09:02.604 +STEP: listing a collection of PDBs in namespace disruption-402 08/24/23 12:09:02.61 +STEP: deleting a collection of PDBs 08/24/23 12:09:02.616 +STEP: Waiting for the PDB collection to be deleted 08/24/23 12:09:02.638 +[AfterEach] Listing PodDisruptionBudgets for all namespaces test/e2e/framework/node/init/init.go:32 -Jul 29 16:09:27.110: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +Aug 24 12:09:02.646: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-apps] DisruptionController + test/e2e/framework/node/init/init.go:32 +Aug 24 12:09:02.659: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces + tear down framework | framework.go:193 +STEP: Destroying namespace "disruption-2-9370" for this suite. 08/24/23 12:09:02.668 +[DeferCleanup (Each)] [sig-apps] DisruptionController + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-apps] DisruptionController + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-apps] DisruptionController tear down framework | framework.go:193 -STEP: Destroying namespace "crd-publish-openapi-1257" for this suite. 07/29/23 16:09:27.13 +STEP: Destroying namespace "disruption-402" for this suite. 08/24/23 12:09:02.681 ------------------------------ -• [SLOW TEST] [10.036 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - works for CRD preserving unknown fields at the schema root [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:194 +• [SLOW TEST] [6.253 seconds] +[sig-apps] DisruptionController +test/e2e/apps/framework.go:23 + Listing PodDisruptionBudgets for all namespaces + test/e2e/apps/disruption.go:78 + should list and delete a collection of PodDisruptionBudgets [Conformance] + test/e2e/apps/disruption.go:87 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [BeforeEach] [sig-apps] DisruptionController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:09:17.106 - Jul 29 16:09:17.106: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 16:09:17.108 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:17.146 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:17.152 - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:08:56.44 + Aug 24 12:08:56.441: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename disruption 08/24/23 12:08:56.443 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:56.486 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:56.491 + [BeforeEach] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:31 - [It] works for CRD preserving unknown fields at the schema root [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:194 - Jul 29 16:09:17.160: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 07/29/23 16:09:20.969 - Jul 29 16:09:20.970: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-1257 --namespace=crd-publish-openapi-1257 create -f -' - Jul 29 16:09:22.716: INFO: stderr: "" - Jul 29 16:09:22.717: INFO: stdout: "e2e-test-crd-publish-openapi-5957-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" - Jul 29 16:09:22.717: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-1257 --namespace=crd-publish-openapi-1257 delete e2e-test-crd-publish-openapi-5957-crds test-cr' - Jul 29 16:09:22.930: INFO: stderr: "" - Jul 29 16:09:22.930: INFO: stdout: "e2e-test-crd-publish-openapi-5957-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" - Jul 29 16:09:22.931: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-1257 --namespace=crd-publish-openapi-1257 apply -f -' - Jul 29 16:09:24.064: INFO: stderr: "" - Jul 29 16:09:24.064: INFO: stdout: "e2e-test-crd-publish-openapi-5957-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" - Jul 29 16:09:24.065: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-1257 --namespace=crd-publish-openapi-1257 delete e2e-test-crd-publish-openapi-5957-crds test-cr' - Jul 29 16:09:24.200: INFO: stderr: "" - Jul 29 16:09:24.200: INFO: stdout: "e2e-test-crd-publish-openapi-5957-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" - STEP: kubectl explain works to explain CR 07/29/23 16:09:24.2 - Jul 29 16:09:24.200: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=crd-publish-openapi-1257 explain e2e-test-crd-publish-openapi-5957-crds' - Jul 29 16:09:24.666: INFO: stderr: "" - Jul 29 16:09:24.666: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-5957-crd\nVERSION: crd-publish-openapi-test-unknown-at-root.example.com/v1\n\nDESCRIPTION:\n \n" - [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [BeforeEach] [sig-apps] DisruptionController + test/e2e/apps/disruption.go:72 + [BeforeEach] Listing PodDisruptionBudgets for all namespaces + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:08:56.495 + Aug 24 12:08:56.495: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename disruption-2 08/24/23 12:08:56.496 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:08:56.53 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:08:56.533 + [BeforeEach] Listing PodDisruptionBudgets for all namespaces + test/e2e/framework/metrics/init/init.go:31 + [It] should list and delete a collection of PodDisruptionBudgets [Conformance] + test/e2e/apps/disruption.go:87 + STEP: Waiting for the pdb to be processed 08/24/23 12:08:56.545 + STEP: Waiting for the pdb to be processed 08/24/23 12:08:58.563 + STEP: Waiting for the pdb to be processed 08/24/23 12:09:00.587 + STEP: listing a collection of PDBs across all namespaces 08/24/23 12:09:02.604 + STEP: listing a collection of PDBs in namespace disruption-402 08/24/23 12:09:02.61 + STEP: deleting a collection of PDBs 08/24/23 12:09:02.616 + STEP: Waiting for the PDB collection to be deleted 08/24/23 12:09:02.638 + [AfterEach] Listing PodDisruptionBudgets for all namespaces test/e2e/framework/node/init/init.go:32 - Jul 29 16:09:27.110: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + Aug 24 12:09:02.646: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-apps] DisruptionController + test/e2e/framework/node/init/init.go:32 + Aug 24 12:09:02.659: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] Listing PodDisruptionBudgets for all namespaces + tear down framework | framework.go:193 + STEP: Destroying namespace "disruption-2-9370" for this suite. 08/24/23 12:09:02.668 + [DeferCleanup (Each)] [sig-apps] DisruptionController + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-apps] DisruptionController + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-apps] DisruptionController tear down framework | framework.go:193 - STEP: Destroying namespace "crd-publish-openapi-1257" for this suite. 07/29/23 16:09:27.13 + STEP: Destroying namespace "disruption-402" for this suite. 08/24/23 12:09:02.681 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSS +SSSSS ------------------------------ -[sig-node] Secrets - should patch a secret [Conformance] - test/e2e/common/node/secrets.go:154 -[BeforeEach] [sig-node] Secrets +[sig-apps] ReplicationController + should test the lifecycle of a ReplicationController [Conformance] + test/e2e/apps/rc.go:110 +[BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:09:27.148 -Jul 29 16:09:27.148: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 16:09:27.151 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:27.185 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:27.19 -[BeforeEach] [sig-node] Secrets +STEP: Creating a kubernetes client 08/24/23 12:09:02.699 +Aug 24 12:09:02.699: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replication-controller 08/24/23 12:09:02.702 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:09:02.733 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:09:02.737 +[BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 -[It] should patch a secret [Conformance] - test/e2e/common/node/secrets.go:154 -STEP: creating a secret 07/29/23 16:09:27.197 -STEP: listing secrets in all namespaces to ensure that there are more than zero 07/29/23 16:09:27.211 -STEP: patching the secret 07/29/23 16:09:27.221 -STEP: deleting the secret using a LabelSelector 07/29/23 16:09:27.242 -STEP: listing secrets in all namespaces, searching for label name and value in patch 07/29/23 16:09:27.261 -[AfterEach] [sig-node] Secrets +[BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 +[It] should test the lifecycle of a ReplicationController [Conformance] + test/e2e/apps/rc.go:110 +STEP: creating a ReplicationController 08/24/23 12:09:02.749 +STEP: waiting for RC to be added 08/24/23 12:09:02.759 +STEP: waiting for available Replicas 08/24/23 12:09:02.759 +STEP: patching ReplicationController 08/24/23 12:09:04.484 +STEP: waiting for RC to be modified 08/24/23 12:09:04.5 +STEP: patching ReplicationController status 08/24/23 12:09:04.5 +STEP: waiting for RC to be modified 08/24/23 12:09:04.515 +STEP: waiting for available Replicas 08/24/23 12:09:04.515 +STEP: fetching ReplicationController status 08/24/23 12:09:04.53 +STEP: patching ReplicationController scale 08/24/23 12:09:04.54 +STEP: waiting for RC to be modified 08/24/23 12:09:04.555 +STEP: waiting for ReplicationController's scale to be the max amount 08/24/23 12:09:04.556 +STEP: fetching ReplicationController; ensuring that it's patched 08/24/23 12:09:06.279 +STEP: updating ReplicationController status 08/24/23 12:09:06.287 +STEP: waiting for RC to be modified 08/24/23 12:09:06.296 +STEP: listing all ReplicationControllers 08/24/23 12:09:06.298 +STEP: checking that ReplicationController has expected values 08/24/23 12:09:06.307 +STEP: deleting ReplicationControllers by collection 08/24/23 12:09:06.308 +STEP: waiting for ReplicationController to have a DELETED watchEvent 08/24/23 12:09:06.322 +[AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 -Jul 29 16:09:27.268: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Secrets +Aug 24 12:09:06.421: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Secrets +[DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Secrets +[DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-5426" for this suite. 07/29/23 16:09:27.279 +STEP: Destroying namespace "replication-controller-6548" for this suite. 08/24/23 12:09:06.43 ------------------------------ -• [0.145 seconds] -[sig-node] Secrets -test/e2e/common/node/framework.go:23 - should patch a secret [Conformance] - test/e2e/common/node/secrets.go:154 +• [3.743 seconds] +[sig-apps] ReplicationController +test/e2e/apps/framework.go:23 + should test the lifecycle of a ReplicationController [Conformance] + test/e2e/apps/rc.go:110 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Secrets + [BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:09:27.148 - Jul 29 16:09:27.148: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 16:09:27.151 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:27.185 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:27.19 - [BeforeEach] [sig-node] Secrets + STEP: Creating a kubernetes client 08/24/23 12:09:02.699 + Aug 24 12:09:02.699: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replication-controller 08/24/23 12:09:02.702 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:09:02.733 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:09:02.737 + [BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 - [It] should patch a secret [Conformance] - test/e2e/common/node/secrets.go:154 - STEP: creating a secret 07/29/23 16:09:27.197 - STEP: listing secrets in all namespaces to ensure that there are more than zero 07/29/23 16:09:27.211 - STEP: patching the secret 07/29/23 16:09:27.221 - STEP: deleting the secret using a LabelSelector 07/29/23 16:09:27.242 - STEP: listing secrets in all namespaces, searching for label name and value in patch 07/29/23 16:09:27.261 - [AfterEach] [sig-node] Secrets + [BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 + [It] should test the lifecycle of a ReplicationController [Conformance] + test/e2e/apps/rc.go:110 + STEP: creating a ReplicationController 08/24/23 12:09:02.749 + STEP: waiting for RC to be added 08/24/23 12:09:02.759 + STEP: waiting for available Replicas 08/24/23 12:09:02.759 + STEP: patching ReplicationController 08/24/23 12:09:04.484 + STEP: waiting for RC to be modified 08/24/23 12:09:04.5 + STEP: patching ReplicationController status 08/24/23 12:09:04.5 + STEP: waiting for RC to be modified 08/24/23 12:09:04.515 + STEP: waiting for available Replicas 08/24/23 12:09:04.515 + STEP: fetching ReplicationController status 08/24/23 12:09:04.53 + STEP: patching ReplicationController scale 08/24/23 12:09:04.54 + STEP: waiting for RC to be modified 08/24/23 12:09:04.555 + STEP: waiting for ReplicationController's scale to be the max amount 08/24/23 12:09:04.556 + STEP: fetching ReplicationController; ensuring that it's patched 08/24/23 12:09:06.279 + STEP: updating ReplicationController status 08/24/23 12:09:06.287 + STEP: waiting for RC to be modified 08/24/23 12:09:06.296 + STEP: listing all ReplicationControllers 08/24/23 12:09:06.298 + STEP: checking that ReplicationController has expected values 08/24/23 12:09:06.307 + STEP: deleting ReplicationControllers by collection 08/24/23 12:09:06.308 + STEP: waiting for ReplicationController to have a DELETED watchEvent 08/24/23 12:09:06.322 + [AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 - Jul 29 16:09:27.268: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Secrets + Aug 24 12:09:06.421: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Secrets + [DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Secrets + [DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-5426" for this suite. 07/29/23 16:09:27.279 + STEP: Destroying namespace "replication-controller-6548" for this suite. 08/24/23 12:09:06.43 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSS +SSS ------------------------------ -[sig-storage] ConfigMap - binary data should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:175 -[BeforeEach] [sig-storage] ConfigMap +[sig-node] Pods + should delete a collection of pods [Conformance] + test/e2e/common/node/pods.go:845 +[BeforeEach] [sig-node] Pods set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:09:27.296 -Jul 29 16:09:27.297: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:09:27.299 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:27.348 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:27.357 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 12:09:06.443 +Aug 24 12:09:06.443: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 12:09:06.445 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:09:06.476 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:09:06.482 +[BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 -[It] binary data should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:175 -STEP: Creating configMap with name configmap-test-upd-a8fb1350-8980-4279-8b2e-b0d989e905b4 07/29/23 16:09:27.381 -STEP: Creating the pod 07/29/23 16:09:27.392 -Jul 29 16:09:27.409: INFO: Waiting up to 5m0s for pod "pod-configmaps-da98156c-ac9c-470e-a626-1f1320125ace" in namespace "configmap-810" to be "running" -Jul 29 16:09:27.418: INFO: Pod "pod-configmaps-da98156c-ac9c-470e-a626-1f1320125ace": Phase="Pending", Reason="", readiness=false. Elapsed: 8.076739ms -Jul 29 16:09:29.434: INFO: Pod "pod-configmaps-da98156c-ac9c-470e-a626-1f1320125ace": Phase="Running", Reason="", readiness=false. Elapsed: 2.024386151s -Jul 29 16:09:29.435: INFO: Pod "pod-configmaps-da98156c-ac9c-470e-a626-1f1320125ace" satisfied condition "running" -STEP: Waiting for pod with text data 07/29/23 16:09:29.435 -STEP: Waiting for pod with binary data 07/29/23 16:09:29.471 -[AfterEach] [sig-storage] ConfigMap +[BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 +[It] should delete a collection of pods [Conformance] + test/e2e/common/node/pods.go:845 +STEP: Create set of pods 08/24/23 12:09:06.487 +Aug 24 12:09:06.507: INFO: created test-pod-1 +Aug 24 12:09:06.520: INFO: created test-pod-2 +Aug 24 12:09:06.543: INFO: created test-pod-3 +STEP: waiting for all 3 pods to be running 08/24/23 12:09:06.543 +Aug 24 12:09:06.544: INFO: Waiting up to 5m0s for all pods (need at least 3) in namespace 'pods-198' to be running and ready +Aug 24 12:09:06.569: INFO: The status of Pod test-pod-1 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed +Aug 24 12:09:06.569: INFO: The status of Pod test-pod-2 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed +Aug 24 12:09:06.569: INFO: The status of Pod test-pod-3 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed +Aug 24 12:09:06.569: INFO: 0 / 3 pods in namespace 'pods-198' are running and ready (0 seconds elapsed) +Aug 24 12:09:06.569: INFO: expected 0 pod replicas in namespace 'pods-198', 0 are Running and Ready. +Aug 24 12:09:06.569: INFO: POD NODE PHASE GRACE CONDITIONS +Aug 24 12:09:06.569: INFO: test-pod-1 pe9deep4seen-3 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC ContainersNotReady containers with unready status: [token-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC ContainersNotReady containers with unready status: [token-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC }] +Aug 24 12:09:06.569: INFO: test-pod-2 pe9deep4seen-3 Pending [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC }] +Aug 24 12:09:06.570: INFO: test-pod-3 pe9deep4seen-3 Pending [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC }] +Aug 24 12:09:06.570: INFO: +Aug 24 12:09:08.588: INFO: 3 / 3 pods in namespace 'pods-198' are running and ready (2 seconds elapsed) +Aug 24 12:09:08.588: INFO: expected 0 pod replicas in namespace 'pods-198', 0 are Running and Ready. +STEP: waiting for all pods to be deleted 08/24/23 12:09:08.623 +Aug 24 12:09:08.631: INFO: Pod quantity 3 is different from expected quantity 0 +Aug 24 12:09:09.639: INFO: Pod quantity 3 is different from expected quantity 0 +Aug 24 12:09:10.641: INFO: Pod quantity 3 is different from expected quantity 0 +[AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 -Jul 29 16:09:29.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 12:09:11.639: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-810" for this suite. 07/29/23 16:09:29.498 +STEP: Destroying namespace "pods-198" for this suite. 08/24/23 12:09:11.647 ------------------------------ -• [2.219 seconds] -[sig-storage] ConfigMap -test/e2e/common/storage/framework.go:23 - binary data should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:175 +• [SLOW TEST] [5.220 seconds] +[sig-node] Pods +test/e2e/common/node/framework.go:23 + should delete a collection of pods [Conformance] + test/e2e/common/node/pods.go:845 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-node] Pods set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:09:27.296 - Jul 29 16:09:27.297: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:09:27.299 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:27.348 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:27.357 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 12:09:06.443 + Aug 24 12:09:06.443: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 12:09:06.445 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:09:06.476 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:09:06.482 + [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 - [It] binary data should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:175 - STEP: Creating configMap with name configmap-test-upd-a8fb1350-8980-4279-8b2e-b0d989e905b4 07/29/23 16:09:27.381 - STEP: Creating the pod 07/29/23 16:09:27.392 - Jul 29 16:09:27.409: INFO: Waiting up to 5m0s for pod "pod-configmaps-da98156c-ac9c-470e-a626-1f1320125ace" in namespace "configmap-810" to be "running" - Jul 29 16:09:27.418: INFO: Pod "pod-configmaps-da98156c-ac9c-470e-a626-1f1320125ace": Phase="Pending", Reason="", readiness=false. Elapsed: 8.076739ms - Jul 29 16:09:29.434: INFO: Pod "pod-configmaps-da98156c-ac9c-470e-a626-1f1320125ace": Phase="Running", Reason="", readiness=false. Elapsed: 2.024386151s - Jul 29 16:09:29.435: INFO: Pod "pod-configmaps-da98156c-ac9c-470e-a626-1f1320125ace" satisfied condition "running" - STEP: Waiting for pod with text data 07/29/23 16:09:29.435 - STEP: Waiting for pod with binary data 07/29/23 16:09:29.471 - [AfterEach] [sig-storage] ConfigMap + [BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 + [It] should delete a collection of pods [Conformance] + test/e2e/common/node/pods.go:845 + STEP: Create set of pods 08/24/23 12:09:06.487 + Aug 24 12:09:06.507: INFO: created test-pod-1 + Aug 24 12:09:06.520: INFO: created test-pod-2 + Aug 24 12:09:06.543: INFO: created test-pod-3 + STEP: waiting for all 3 pods to be running 08/24/23 12:09:06.543 + Aug 24 12:09:06.544: INFO: Waiting up to 5m0s for all pods (need at least 3) in namespace 'pods-198' to be running and ready + Aug 24 12:09:06.569: INFO: The status of Pod test-pod-1 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed + Aug 24 12:09:06.569: INFO: The status of Pod test-pod-2 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed + Aug 24 12:09:06.569: INFO: The status of Pod test-pod-3 is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed + Aug 24 12:09:06.569: INFO: 0 / 3 pods in namespace 'pods-198' are running and ready (0 seconds elapsed) + Aug 24 12:09:06.569: INFO: expected 0 pod replicas in namespace 'pods-198', 0 are Running and Ready. + Aug 24 12:09:06.569: INFO: POD NODE PHASE GRACE CONDITIONS + Aug 24 12:09:06.569: INFO: test-pod-1 pe9deep4seen-3 Pending [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC ContainersNotReady containers with unready status: [token-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC ContainersNotReady containers with unready status: [token-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC }] + Aug 24 12:09:06.569: INFO: test-pod-2 pe9deep4seen-3 Pending [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC }] + Aug 24 12:09:06.570: INFO: test-pod-3 pe9deep4seen-3 Pending [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:09:06 +0000 UTC }] + Aug 24 12:09:06.570: INFO: + Aug 24 12:09:08.588: INFO: 3 / 3 pods in namespace 'pods-198' are running and ready (2 seconds elapsed) + Aug 24 12:09:08.588: INFO: expected 0 pod replicas in namespace 'pods-198', 0 are Running and Ready. + STEP: waiting for all pods to be deleted 08/24/23 12:09:08.623 + Aug 24 12:09:08.631: INFO: Pod quantity 3 is different from expected quantity 0 + Aug 24 12:09:09.639: INFO: Pod quantity 3 is different from expected quantity 0 + Aug 24 12:09:10.641: INFO: Pod quantity 3 is different from expected quantity 0 + [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 - Jul 29 16:09:29.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 12:09:11.639: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-810" for this suite. 07/29/23 16:09:29.498 + STEP: Destroying namespace "pods-198" for this suite. 08/24/23 12:09:11.647 << End Captured GinkgoWriter Output ------------------------------ -SSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] Garbage collector - should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] - test/e2e/apimachinery/garbage_collector.go:650 + should orphan pods created by rc if delete options say so [Conformance] + test/e2e/apimachinery/garbage_collector.go:370 [BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:09:29.519 -Jul 29 16:09:29.519: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename gc 07/29/23 16:09:29.522 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:29.562 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:29.57 +STEP: Creating a kubernetes client 08/24/23 12:09:11.681 +Aug 24 12:09:11.681: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename gc 08/24/23 12:09:11.686 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:09:11.717 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:09:11.721 [BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 -[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] - test/e2e/apimachinery/garbage_collector.go:650 -STEP: create the rc 07/29/23 16:09:29.581 -STEP: delete the rc 07/29/23 16:09:34.657 -STEP: wait for the rc to be deleted 07/29/23 16:09:35.094 -Jul 29 16:09:36.945: INFO: 88 pods remaining -Jul 29 16:09:36.945: INFO: 80 pods has nil DeletionTimestamp -Jul 29 16:09:36.945: INFO: -Jul 29 16:09:37.916: INFO: 74 pods remaining -Jul 29 16:09:37.917: INFO: 62 pods has nil DeletionTimestamp -Jul 29 16:09:37.917: INFO: -Jul 29 16:09:38.342: INFO: 67 pods remaining -Jul 29 16:09:38.342: INFO: 50 pods has nil DeletionTimestamp -Jul 29 16:09:38.342: INFO: -Jul 29 16:09:40.381: INFO: 60 pods remaining -Jul 29 16:09:40.403: INFO: 37 pods has nil DeletionTimestamp -Jul 29 16:09:40.403: INFO: -Jul 29 16:09:41.444: INFO: 49 pods remaining -Jul 29 16:09:41.444: INFO: 11 pods has nil DeletionTimestamp -Jul 29 16:09:41.444: INFO: -Jul 29 16:09:42.480: INFO: 42 pods remaining -Jul 29 16:09:42.502: INFO: 0 pods has nil DeletionTimestamp -Jul 29 16:09:42.502: INFO: -Jul 29 16:09:43.422: INFO: 39 pods remaining -Jul 29 16:09:43.422: INFO: 0 pods has nil DeletionTimestamp -Jul 29 16:09:43.422: INFO: -Jul 29 16:09:44.487: INFO: 30 pods remaining -Jul 29 16:09:44.489: INFO: 0 pods has nil DeletionTimestamp -Jul 29 16:09:44.489: INFO: -Jul 29 16:09:45.232: INFO: 24 pods remaining -Jul 29 16:09:45.232: INFO: 0 pods has nil DeletionTimestamp -Jul 29 16:09:45.233: INFO: -Jul 29 16:09:46.265: INFO: 17 pods remaining -Jul 29 16:09:46.265: INFO: 0 pods has nil DeletionTimestamp -Jul 29 16:09:46.265: INFO: -Jul 29 16:09:47.330: INFO: 10 pods remaining -Jul 29 16:09:47.331: INFO: 0 pods has nil DeletionTimestamp -Jul 29 16:09:47.334: INFO: -Jul 29 16:09:48.206: INFO: 5 pods remaining -Jul 29 16:09:48.206: INFO: 0 pods has nil DeletionTimestamp -Jul 29 16:09:48.207: INFO: -STEP: Gathering metrics 07/29/23 16:09:49.202 -Jul 29 16:09:49.462: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" -Jul 29 16:09:49.469: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.992064ms -Jul 29 16:09:49.469: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) -Jul 29 16:09:49.470: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" -Jul 29 16:09:49.962: INFO: For apiserver_request_total: +[It] should orphan pods created by rc if delete options say so [Conformance] + test/e2e/apimachinery/garbage_collector.go:370 +STEP: create the rc 08/24/23 12:09:11.732 +STEP: delete the rc 08/24/23 12:09:16.995 +STEP: wait for the rc to be deleted 08/24/23 12:09:17.024 +STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods 08/24/23 12:09:22.297 +STEP: Gathering metrics 08/24/23 12:09:52.327 +Aug 24 12:09:52.359: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" +Aug 24 12:09:52.365: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.609153ms +Aug 24 12:09:52.365: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) +Aug 24 12:09:52.365: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" +Aug 24 12:09:52.472: INFO: For apiserver_request_total: For apiserver_request_latency_seconds: For apiserver_init_events_total: For garbage_collector_attempt_to_delete_queue_latency: @@ -11630,80 +11441,145 @@ For function_duration_seconds: For errors_total: For evicted_pods_total: +Aug 24 12:09:52.472: INFO: Deleting pod "simpletest.rc-2vc7k" in namespace "gc-4959" +Aug 24 12:09:52.528: INFO: Deleting pod "simpletest.rc-42m6h" in namespace "gc-4959" +Aug 24 12:09:52.568: INFO: Deleting pod "simpletest.rc-442wc" in namespace "gc-4959" +Aug 24 12:09:52.623: INFO: Deleting pod "simpletest.rc-44c2n" in namespace "gc-4959" +Aug 24 12:09:52.707: INFO: Deleting pod "simpletest.rc-46f8q" in namespace "gc-4959" +Aug 24 12:09:52.775: INFO: Deleting pod "simpletest.rc-46txg" in namespace "gc-4959" +Aug 24 12:09:52.863: INFO: Deleting pod "simpletest.rc-4bq76" in namespace "gc-4959" +Aug 24 12:09:52.937: INFO: Deleting pod "simpletest.rc-4qx47" in namespace "gc-4959" +Aug 24 12:09:53.011: INFO: Deleting pod "simpletest.rc-4zq54" in namespace "gc-4959" +Aug 24 12:09:53.050: INFO: Deleting pod "simpletest.rc-54tm6" in namespace "gc-4959" +Aug 24 12:09:53.129: INFO: Deleting pod "simpletest.rc-59gqq" in namespace "gc-4959" +Aug 24 12:09:53.282: INFO: Deleting pod "simpletest.rc-5l2t2" in namespace "gc-4959" +Aug 24 12:09:53.331: INFO: Deleting pod "simpletest.rc-5twlk" in namespace "gc-4959" +Aug 24 12:09:53.395: INFO: Deleting pod "simpletest.rc-659bs" in namespace "gc-4959" +Aug 24 12:09:53.475: INFO: Deleting pod "simpletest.rc-65vnp" in namespace "gc-4959" +Aug 24 12:09:53.566: INFO: Deleting pod "simpletest.rc-68hgd" in namespace "gc-4959" +Aug 24 12:09:53.709: INFO: Deleting pod "simpletest.rc-6hfd5" in namespace "gc-4959" +Aug 24 12:09:53.733: INFO: Deleting pod "simpletest.rc-6lzpm" in namespace "gc-4959" +Aug 24 12:09:53.753: INFO: Deleting pod "simpletest.rc-6mqg4" in namespace "gc-4959" +Aug 24 12:09:53.792: INFO: Deleting pod "simpletest.rc-6pdlg" in namespace "gc-4959" +Aug 24 12:09:53.877: INFO: Deleting pod "simpletest.rc-6rxx8" in namespace "gc-4959" +Aug 24 12:09:53.914: INFO: Deleting pod "simpletest.rc-6xfnl" in namespace "gc-4959" +Aug 24 12:09:53.958: INFO: Deleting pod "simpletest.rc-85pvf" in namespace "gc-4959" +Aug 24 12:09:54.019: INFO: Deleting pod "simpletest.rc-86ms7" in namespace "gc-4959" +Aug 24 12:09:54.082: INFO: Deleting pod "simpletest.rc-89ftv" in namespace "gc-4959" +Aug 24 12:09:54.198: INFO: Deleting pod "simpletest.rc-8rxvm" in namespace "gc-4959" +Aug 24 12:09:54.269: INFO: Deleting pod "simpletest.rc-8t9rv" in namespace "gc-4959" +Aug 24 12:09:54.323: INFO: Deleting pod "simpletest.rc-95sfp" in namespace "gc-4959" +Aug 24 12:09:54.371: INFO: Deleting pod "simpletest.rc-bsffz" in namespace "gc-4959" +Aug 24 12:09:54.449: INFO: Deleting pod "simpletest.rc-bzcsk" in namespace "gc-4959" +Aug 24 12:09:54.508: INFO: Deleting pod "simpletest.rc-c2zdx" in namespace "gc-4959" +Aug 24 12:09:54.558: INFO: Deleting pod "simpletest.rc-cn8mv" in namespace "gc-4959" +Aug 24 12:09:54.619: INFO: Deleting pod "simpletest.rc-cr922" in namespace "gc-4959" +Aug 24 12:09:54.730: INFO: Deleting pod "simpletest.rc-cxpck" in namespace "gc-4959" +Aug 24 12:09:54.845: INFO: Deleting pod "simpletest.rc-dj8p4" in namespace "gc-4959" +Aug 24 12:09:54.905: INFO: Deleting pod "simpletest.rc-djjjk" in namespace "gc-4959" +Aug 24 12:09:54.941: INFO: Deleting pod "simpletest.rc-dms4d" in namespace "gc-4959" +Aug 24 12:09:55.158: INFO: Deleting pod "simpletest.rc-dprgw" in namespace "gc-4959" +Aug 24 12:09:55.236: INFO: Deleting pod "simpletest.rc-drzgp" in namespace "gc-4959" +Aug 24 12:09:55.290: INFO: Deleting pod "simpletest.rc-dx79b" in namespace "gc-4959" +Aug 24 12:09:55.401: INFO: Deleting pod "simpletest.rc-f2mb6" in namespace "gc-4959" +Aug 24 12:09:55.487: INFO: Deleting pod "simpletest.rc-f4xsl" in namespace "gc-4959" +Aug 24 12:09:55.628: INFO: Deleting pod "simpletest.rc-fl2b5" in namespace "gc-4959" +Aug 24 12:09:55.735: INFO: Deleting pod "simpletest.rc-fmdgw" in namespace "gc-4959" +Aug 24 12:09:55.798: INFO: Deleting pod "simpletest.rc-fqxrn" in namespace "gc-4959" +Aug 24 12:09:55.888: INFO: Deleting pod "simpletest.rc-g4dc9" in namespace "gc-4959" +Aug 24 12:09:55.950: INFO: Deleting pod "simpletest.rc-g68vt" in namespace "gc-4959" +Aug 24 12:09:56.058: INFO: Deleting pod "simpletest.rc-gjqbw" in namespace "gc-4959" +Aug 24 12:09:56.173: INFO: Deleting pod "simpletest.rc-gsrh8" in namespace "gc-4959" +Aug 24 12:09:56.227: INFO: Deleting pod "simpletest.rc-hh7kc" in namespace "gc-4959" +Aug 24 12:09:56.313: INFO: Deleting pod "simpletest.rc-hndcc" in namespace "gc-4959" +Aug 24 12:09:56.372: INFO: Deleting pod "simpletest.rc-hv28k" in namespace "gc-4959" +Aug 24 12:09:56.488: INFO: Deleting pod "simpletest.rc-jbxmn" in namespace "gc-4959" +Aug 24 12:09:56.591: INFO: Deleting pod "simpletest.rc-jx8b8" in namespace "gc-4959" +Aug 24 12:09:56.664: INFO: Deleting pod "simpletest.rc-kbp4k" in namespace "gc-4959" +Aug 24 12:09:56.776: INFO: Deleting pod "simpletest.rc-kf8v4" in namespace "gc-4959" +Aug 24 12:09:56.901: INFO: Deleting pod "simpletest.rc-kl9pm" in namespace "gc-4959" +Aug 24 12:09:56.997: INFO: Deleting pod "simpletest.rc-l8dwr" in namespace "gc-4959" +Aug 24 12:09:57.126: INFO: Deleting pod "simpletest.rc-lcs2s" in namespace "gc-4959" +Aug 24 12:09:57.182: INFO: Deleting pod "simpletest.rc-m26gv" in namespace "gc-4959" +Aug 24 12:09:57.222: INFO: Deleting pod "simpletest.rc-m9fqg" in namespace "gc-4959" +Aug 24 12:09:57.256: INFO: Deleting pod "simpletest.rc-m9xb4" in namespace "gc-4959" +Aug 24 12:09:57.301: INFO: Deleting pod "simpletest.rc-mb62s" in namespace "gc-4959" +Aug 24 12:09:57.363: INFO: Deleting pod "simpletest.rc-nc8lf" in namespace "gc-4959" +Aug 24 12:09:57.457: INFO: Deleting pod "simpletest.rc-nnfvm" in namespace "gc-4959" +Aug 24 12:09:57.574: INFO: Deleting pod "simpletest.rc-p2ftm" in namespace "gc-4959" +Aug 24 12:09:57.668: INFO: Deleting pod "simpletest.rc-pmtbf" in namespace "gc-4959" +Aug 24 12:09:57.732: INFO: Deleting pod "simpletest.rc-ppdzg" in namespace "gc-4959" +Aug 24 12:09:57.800: INFO: Deleting pod "simpletest.rc-ppznp" in namespace "gc-4959" +Aug 24 12:09:57.843: INFO: Deleting pod "simpletest.rc-ptsdj" in namespace "gc-4959" +Aug 24 12:09:57.878: INFO: Deleting pod "simpletest.rc-q2rzm" in namespace "gc-4959" +Aug 24 12:09:58.026: INFO: Deleting pod "simpletest.rc-q2vhf" in namespace "gc-4959" +Aug 24 12:09:58.366: INFO: Deleting pod "simpletest.rc-q82mt" in namespace "gc-4959" +Aug 24 12:09:58.388: INFO: Deleting pod "simpletest.rc-qdstk" in namespace "gc-4959" +Aug 24 12:09:58.435: INFO: Deleting pod "simpletest.rc-qkgsm" in namespace "gc-4959" +Aug 24 12:09:58.496: INFO: Deleting pod "simpletest.rc-s542t" in namespace "gc-4959" +Aug 24 12:09:58.548: INFO: Deleting pod "simpletest.rc-s6zv4" in namespace "gc-4959" +Aug 24 12:09:58.655: INFO: Deleting pod "simpletest.rc-s8zs7" in namespace "gc-4959" +Aug 24 12:09:58.737: INFO: Deleting pod "simpletest.rc-smksv" in namespace "gc-4959" +Aug 24 12:09:58.779: INFO: Deleting pod "simpletest.rc-t22z9" in namespace "gc-4959" +Aug 24 12:09:58.835: INFO: Deleting pod "simpletest.rc-t9624" in namespace "gc-4959" +Aug 24 12:09:58.887: INFO: Deleting pod "simpletest.rc-tjxw9" in namespace "gc-4959" +Aug 24 12:09:58.966: INFO: Deleting pod "simpletest.rc-trwkx" in namespace "gc-4959" +Aug 24 12:09:59.144: INFO: Deleting pod "simpletest.rc-txltk" in namespace "gc-4959" +Aug 24 12:09:59.236: INFO: Deleting pod "simpletest.rc-v2wkl" in namespace "gc-4959" +Aug 24 12:09:59.273: INFO: Deleting pod "simpletest.rc-v2zrz" in namespace "gc-4959" +Aug 24 12:09:59.375: INFO: Deleting pod "simpletest.rc-vfmd6" in namespace "gc-4959" +Aug 24 12:09:59.612: INFO: Deleting pod "simpletest.rc-vg6jj" in namespace "gc-4959" +Aug 24 12:09:59.755: INFO: Deleting pod "simpletest.rc-vj9lq" in namespace "gc-4959" +Aug 24 12:09:59.870: INFO: Deleting pod "simpletest.rc-vlwxf" in namespace "gc-4959" +Aug 24 12:09:59.938: INFO: Deleting pod "simpletest.rc-w6vb4" in namespace "gc-4959" +Aug 24 12:10:00.084: INFO: Deleting pod "simpletest.rc-w9mqg" in namespace "gc-4959" +Aug 24 12:10:00.197: INFO: Deleting pod "simpletest.rc-wh8hd" in namespace "gc-4959" +Aug 24 12:10:00.267: INFO: Deleting pod "simpletest.rc-wkpc8" in namespace "gc-4959" +Aug 24 12:10:00.343: INFO: Deleting pod "simpletest.rc-wkwww" in namespace "gc-4959" +Aug 24 12:10:00.451: INFO: Deleting pod "simpletest.rc-wxblj" in namespace "gc-4959" +Aug 24 12:10:00.511: INFO: Deleting pod "simpletest.rc-xhrwf" in namespace "gc-4959" +Aug 24 12:10:00.574: INFO: Deleting pod "simpletest.rc-xmjw8" in namespace "gc-4959" +Aug 24 12:10:00.672: INFO: Deleting pod "simpletest.rc-xnkt5" in namespace "gc-4959" +Aug 24 12:10:00.719: INFO: Deleting pod "simpletest.rc-xvlk4" in namespace "gc-4959" [AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 -Jul 29 16:09:49.963: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:10:00.797: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 -STEP: Destroying namespace "gc-1297" for this suite. 07/29/23 16:09:49.972 +STEP: Destroying namespace "gc-4959" for this suite. 08/24/23 12:10:00.911 ------------------------------ -• [SLOW TEST] [20.469 seconds] +• [SLOW TEST] [49.303 seconds] [sig-api-machinery] Garbage collector test/e2e/apimachinery/framework.go:23 - should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] - test/e2e/apimachinery/garbage_collector.go:650 + should orphan pods created by rc if delete options say so [Conformance] + test/e2e/apimachinery/garbage_collector.go:370 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:09:29.519 - Jul 29 16:09:29.519: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename gc 07/29/23 16:09:29.522 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:29.562 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:29.57 + STEP: Creating a kubernetes client 08/24/23 12:09:11.681 + Aug 24 12:09:11.681: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename gc 08/24/23 12:09:11.686 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:09:11.717 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:09:11.721 [BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 - [It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] - test/e2e/apimachinery/garbage_collector.go:650 - STEP: create the rc 07/29/23 16:09:29.581 - STEP: delete the rc 07/29/23 16:09:34.657 - STEP: wait for the rc to be deleted 07/29/23 16:09:35.094 - Jul 29 16:09:36.945: INFO: 88 pods remaining - Jul 29 16:09:36.945: INFO: 80 pods has nil DeletionTimestamp - Jul 29 16:09:36.945: INFO: - Jul 29 16:09:37.916: INFO: 74 pods remaining - Jul 29 16:09:37.917: INFO: 62 pods has nil DeletionTimestamp - Jul 29 16:09:37.917: INFO: - Jul 29 16:09:38.342: INFO: 67 pods remaining - Jul 29 16:09:38.342: INFO: 50 pods has nil DeletionTimestamp - Jul 29 16:09:38.342: INFO: - Jul 29 16:09:40.381: INFO: 60 pods remaining - Jul 29 16:09:40.403: INFO: 37 pods has nil DeletionTimestamp - Jul 29 16:09:40.403: INFO: - Jul 29 16:09:41.444: INFO: 49 pods remaining - Jul 29 16:09:41.444: INFO: 11 pods has nil DeletionTimestamp - Jul 29 16:09:41.444: INFO: - Jul 29 16:09:42.480: INFO: 42 pods remaining - Jul 29 16:09:42.502: INFO: 0 pods has nil DeletionTimestamp - Jul 29 16:09:42.502: INFO: - Jul 29 16:09:43.422: INFO: 39 pods remaining - Jul 29 16:09:43.422: INFO: 0 pods has nil DeletionTimestamp - Jul 29 16:09:43.422: INFO: - Jul 29 16:09:44.487: INFO: 30 pods remaining - Jul 29 16:09:44.489: INFO: 0 pods has nil DeletionTimestamp - Jul 29 16:09:44.489: INFO: - Jul 29 16:09:45.232: INFO: 24 pods remaining - Jul 29 16:09:45.232: INFO: 0 pods has nil DeletionTimestamp - Jul 29 16:09:45.233: INFO: - Jul 29 16:09:46.265: INFO: 17 pods remaining - Jul 29 16:09:46.265: INFO: 0 pods has nil DeletionTimestamp - Jul 29 16:09:46.265: INFO: - Jul 29 16:09:47.330: INFO: 10 pods remaining - Jul 29 16:09:47.331: INFO: 0 pods has nil DeletionTimestamp - Jul 29 16:09:47.334: INFO: - Jul 29 16:09:48.206: INFO: 5 pods remaining - Jul 29 16:09:48.206: INFO: 0 pods has nil DeletionTimestamp - Jul 29 16:09:48.207: INFO: - STEP: Gathering metrics 07/29/23 16:09:49.202 - Jul 29 16:09:49.462: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" - Jul 29 16:09:49.469: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.992064ms - Jul 29 16:09:49.469: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) - Jul 29 16:09:49.470: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" - Jul 29 16:09:49.962: INFO: For apiserver_request_total: + [It] should orphan pods created by rc if delete options say so [Conformance] + test/e2e/apimachinery/garbage_collector.go:370 + STEP: create the rc 08/24/23 12:09:11.732 + STEP: delete the rc 08/24/23 12:09:16.995 + STEP: wait for the rc to be deleted 08/24/23 12:09:17.024 + STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods 08/24/23 12:09:22.297 + STEP: Gathering metrics 08/24/23 12:09:52.327 + Aug 24 12:09:52.359: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" + Aug 24 12:09:52.365: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.609153ms + Aug 24 12:09:52.365: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) + Aug 24 12:09:52.365: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" + Aug 24 12:09:52.472: INFO: For apiserver_request_total: For apiserver_request_latency_seconds: For apiserver_init_events_total: For garbage_collector_attempt_to_delete_queue_latency: @@ -11726,4678 +11602,3120 @@ test/e2e/apimachinery/framework.go:23 For errors_total: For evicted_pods_total: + Aug 24 12:09:52.472: INFO: Deleting pod "simpletest.rc-2vc7k" in namespace "gc-4959" + Aug 24 12:09:52.528: INFO: Deleting pod "simpletest.rc-42m6h" in namespace "gc-4959" + Aug 24 12:09:52.568: INFO: Deleting pod "simpletest.rc-442wc" in namespace "gc-4959" + Aug 24 12:09:52.623: INFO: Deleting pod "simpletest.rc-44c2n" in namespace "gc-4959" + Aug 24 12:09:52.707: INFO: Deleting pod "simpletest.rc-46f8q" in namespace "gc-4959" + Aug 24 12:09:52.775: INFO: Deleting pod "simpletest.rc-46txg" in namespace "gc-4959" + Aug 24 12:09:52.863: INFO: Deleting pod "simpletest.rc-4bq76" in namespace "gc-4959" + Aug 24 12:09:52.937: INFO: Deleting pod "simpletest.rc-4qx47" in namespace "gc-4959" + Aug 24 12:09:53.011: INFO: Deleting pod "simpletest.rc-4zq54" in namespace "gc-4959" + Aug 24 12:09:53.050: INFO: Deleting pod "simpletest.rc-54tm6" in namespace "gc-4959" + Aug 24 12:09:53.129: INFO: Deleting pod "simpletest.rc-59gqq" in namespace "gc-4959" + Aug 24 12:09:53.282: INFO: Deleting pod "simpletest.rc-5l2t2" in namespace "gc-4959" + Aug 24 12:09:53.331: INFO: Deleting pod "simpletest.rc-5twlk" in namespace "gc-4959" + Aug 24 12:09:53.395: INFO: Deleting pod "simpletest.rc-659bs" in namespace "gc-4959" + Aug 24 12:09:53.475: INFO: Deleting pod "simpletest.rc-65vnp" in namespace "gc-4959" + Aug 24 12:09:53.566: INFO: Deleting pod "simpletest.rc-68hgd" in namespace "gc-4959" + Aug 24 12:09:53.709: INFO: Deleting pod "simpletest.rc-6hfd5" in namespace "gc-4959" + Aug 24 12:09:53.733: INFO: Deleting pod "simpletest.rc-6lzpm" in namespace "gc-4959" + Aug 24 12:09:53.753: INFO: Deleting pod "simpletest.rc-6mqg4" in namespace "gc-4959" + Aug 24 12:09:53.792: INFO: Deleting pod "simpletest.rc-6pdlg" in namespace "gc-4959" + Aug 24 12:09:53.877: INFO: Deleting pod "simpletest.rc-6rxx8" in namespace "gc-4959" + Aug 24 12:09:53.914: INFO: Deleting pod "simpletest.rc-6xfnl" in namespace "gc-4959" + Aug 24 12:09:53.958: INFO: Deleting pod "simpletest.rc-85pvf" in namespace "gc-4959" + Aug 24 12:09:54.019: INFO: Deleting pod "simpletest.rc-86ms7" in namespace "gc-4959" + Aug 24 12:09:54.082: INFO: Deleting pod "simpletest.rc-89ftv" in namespace "gc-4959" + Aug 24 12:09:54.198: INFO: Deleting pod "simpletest.rc-8rxvm" in namespace "gc-4959" + Aug 24 12:09:54.269: INFO: Deleting pod "simpletest.rc-8t9rv" in namespace "gc-4959" + Aug 24 12:09:54.323: INFO: Deleting pod "simpletest.rc-95sfp" in namespace "gc-4959" + Aug 24 12:09:54.371: INFO: Deleting pod "simpletest.rc-bsffz" in namespace "gc-4959" + Aug 24 12:09:54.449: INFO: Deleting pod "simpletest.rc-bzcsk" in namespace "gc-4959" + Aug 24 12:09:54.508: INFO: Deleting pod "simpletest.rc-c2zdx" in namespace "gc-4959" + Aug 24 12:09:54.558: INFO: Deleting pod "simpletest.rc-cn8mv" in namespace "gc-4959" + Aug 24 12:09:54.619: INFO: Deleting pod "simpletest.rc-cr922" in namespace "gc-4959" + Aug 24 12:09:54.730: INFO: Deleting pod "simpletest.rc-cxpck" in namespace "gc-4959" + Aug 24 12:09:54.845: INFO: Deleting pod "simpletest.rc-dj8p4" in namespace "gc-4959" + Aug 24 12:09:54.905: INFO: Deleting pod "simpletest.rc-djjjk" in namespace "gc-4959" + Aug 24 12:09:54.941: INFO: Deleting pod "simpletest.rc-dms4d" in namespace "gc-4959" + Aug 24 12:09:55.158: INFO: Deleting pod "simpletest.rc-dprgw" in namespace "gc-4959" + Aug 24 12:09:55.236: INFO: Deleting pod "simpletest.rc-drzgp" in namespace "gc-4959" + Aug 24 12:09:55.290: INFO: Deleting pod "simpletest.rc-dx79b" in namespace "gc-4959" + Aug 24 12:09:55.401: INFO: Deleting pod "simpletest.rc-f2mb6" in namespace "gc-4959" + Aug 24 12:09:55.487: INFO: Deleting pod "simpletest.rc-f4xsl" in namespace "gc-4959" + Aug 24 12:09:55.628: INFO: Deleting pod "simpletest.rc-fl2b5" in namespace "gc-4959" + Aug 24 12:09:55.735: INFO: Deleting pod "simpletest.rc-fmdgw" in namespace "gc-4959" + Aug 24 12:09:55.798: INFO: Deleting pod "simpletest.rc-fqxrn" in namespace "gc-4959" + Aug 24 12:09:55.888: INFO: Deleting pod "simpletest.rc-g4dc9" in namespace "gc-4959" + Aug 24 12:09:55.950: INFO: Deleting pod "simpletest.rc-g68vt" in namespace "gc-4959" + Aug 24 12:09:56.058: INFO: Deleting pod "simpletest.rc-gjqbw" in namespace "gc-4959" + Aug 24 12:09:56.173: INFO: Deleting pod "simpletest.rc-gsrh8" in namespace "gc-4959" + Aug 24 12:09:56.227: INFO: Deleting pod "simpletest.rc-hh7kc" in namespace "gc-4959" + Aug 24 12:09:56.313: INFO: Deleting pod "simpletest.rc-hndcc" in namespace "gc-4959" + Aug 24 12:09:56.372: INFO: Deleting pod "simpletest.rc-hv28k" in namespace "gc-4959" + Aug 24 12:09:56.488: INFO: Deleting pod "simpletest.rc-jbxmn" in namespace "gc-4959" + Aug 24 12:09:56.591: INFO: Deleting pod "simpletest.rc-jx8b8" in namespace "gc-4959" + Aug 24 12:09:56.664: INFO: Deleting pod "simpletest.rc-kbp4k" in namespace "gc-4959" + Aug 24 12:09:56.776: INFO: Deleting pod "simpletest.rc-kf8v4" in namespace "gc-4959" + Aug 24 12:09:56.901: INFO: Deleting pod "simpletest.rc-kl9pm" in namespace "gc-4959" + Aug 24 12:09:56.997: INFO: Deleting pod "simpletest.rc-l8dwr" in namespace "gc-4959" + Aug 24 12:09:57.126: INFO: Deleting pod "simpletest.rc-lcs2s" in namespace "gc-4959" + Aug 24 12:09:57.182: INFO: Deleting pod "simpletest.rc-m26gv" in namespace "gc-4959" + Aug 24 12:09:57.222: INFO: Deleting pod "simpletest.rc-m9fqg" in namespace "gc-4959" + Aug 24 12:09:57.256: INFO: Deleting pod "simpletest.rc-m9xb4" in namespace "gc-4959" + Aug 24 12:09:57.301: INFO: Deleting pod "simpletest.rc-mb62s" in namespace "gc-4959" + Aug 24 12:09:57.363: INFO: Deleting pod "simpletest.rc-nc8lf" in namespace "gc-4959" + Aug 24 12:09:57.457: INFO: Deleting pod "simpletest.rc-nnfvm" in namespace "gc-4959" + Aug 24 12:09:57.574: INFO: Deleting pod "simpletest.rc-p2ftm" in namespace "gc-4959" + Aug 24 12:09:57.668: INFO: Deleting pod "simpletest.rc-pmtbf" in namespace "gc-4959" + Aug 24 12:09:57.732: INFO: Deleting pod "simpletest.rc-ppdzg" in namespace "gc-4959" + Aug 24 12:09:57.800: INFO: Deleting pod "simpletest.rc-ppznp" in namespace "gc-4959" + Aug 24 12:09:57.843: INFO: Deleting pod "simpletest.rc-ptsdj" in namespace "gc-4959" + Aug 24 12:09:57.878: INFO: Deleting pod "simpletest.rc-q2rzm" in namespace "gc-4959" + Aug 24 12:09:58.026: INFO: Deleting pod "simpletest.rc-q2vhf" in namespace "gc-4959" + Aug 24 12:09:58.366: INFO: Deleting pod "simpletest.rc-q82mt" in namespace "gc-4959" + Aug 24 12:09:58.388: INFO: Deleting pod "simpletest.rc-qdstk" in namespace "gc-4959" + Aug 24 12:09:58.435: INFO: Deleting pod "simpletest.rc-qkgsm" in namespace "gc-4959" + Aug 24 12:09:58.496: INFO: Deleting pod "simpletest.rc-s542t" in namespace "gc-4959" + Aug 24 12:09:58.548: INFO: Deleting pod "simpletest.rc-s6zv4" in namespace "gc-4959" + Aug 24 12:09:58.655: INFO: Deleting pod "simpletest.rc-s8zs7" in namespace "gc-4959" + Aug 24 12:09:58.737: INFO: Deleting pod "simpletest.rc-smksv" in namespace "gc-4959" + Aug 24 12:09:58.779: INFO: Deleting pod "simpletest.rc-t22z9" in namespace "gc-4959" + Aug 24 12:09:58.835: INFO: Deleting pod "simpletest.rc-t9624" in namespace "gc-4959" + Aug 24 12:09:58.887: INFO: Deleting pod "simpletest.rc-tjxw9" in namespace "gc-4959" + Aug 24 12:09:58.966: INFO: Deleting pod "simpletest.rc-trwkx" in namespace "gc-4959" + Aug 24 12:09:59.144: INFO: Deleting pod "simpletest.rc-txltk" in namespace "gc-4959" + Aug 24 12:09:59.236: INFO: Deleting pod "simpletest.rc-v2wkl" in namespace "gc-4959" + Aug 24 12:09:59.273: INFO: Deleting pod "simpletest.rc-v2zrz" in namespace "gc-4959" + Aug 24 12:09:59.375: INFO: Deleting pod "simpletest.rc-vfmd6" in namespace "gc-4959" + Aug 24 12:09:59.612: INFO: Deleting pod "simpletest.rc-vg6jj" in namespace "gc-4959" + Aug 24 12:09:59.755: INFO: Deleting pod "simpletest.rc-vj9lq" in namespace "gc-4959" + Aug 24 12:09:59.870: INFO: Deleting pod "simpletest.rc-vlwxf" in namespace "gc-4959" + Aug 24 12:09:59.938: INFO: Deleting pod "simpletest.rc-w6vb4" in namespace "gc-4959" + Aug 24 12:10:00.084: INFO: Deleting pod "simpletest.rc-w9mqg" in namespace "gc-4959" + Aug 24 12:10:00.197: INFO: Deleting pod "simpletest.rc-wh8hd" in namespace "gc-4959" + Aug 24 12:10:00.267: INFO: Deleting pod "simpletest.rc-wkpc8" in namespace "gc-4959" + Aug 24 12:10:00.343: INFO: Deleting pod "simpletest.rc-wkwww" in namespace "gc-4959" + Aug 24 12:10:00.451: INFO: Deleting pod "simpletest.rc-wxblj" in namespace "gc-4959" + Aug 24 12:10:00.511: INFO: Deleting pod "simpletest.rc-xhrwf" in namespace "gc-4959" + Aug 24 12:10:00.574: INFO: Deleting pod "simpletest.rc-xmjw8" in namespace "gc-4959" + Aug 24 12:10:00.672: INFO: Deleting pod "simpletest.rc-xnkt5" in namespace "gc-4959" + Aug 24 12:10:00.719: INFO: Deleting pod "simpletest.rc-xvlk4" in namespace "gc-4959" [AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 - Jul 29 16:09:49.963: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:10:00.797: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 - STEP: Destroying namespace "gc-1297" for this suite. 07/29/23 16:09:49.972 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-storage] Downward API volume - should update labels on modification [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:130 -[BeforeEach] [sig-storage] Downward API volume - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:09:49.991 -Jul 29 16:09:49.991: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 16:09:50.035 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:50.528 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:50.535 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 -[It] should update labels on modification [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:130 -STEP: Creating the pod 07/29/23 16:09:50.545 -Jul 29 16:09:50.929: INFO: Waiting up to 5m0s for pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a" in namespace "downward-api-3226" to be "running and ready" -Jul 29 16:09:50.978: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a": Phase="Pending", Reason="", readiness=false. Elapsed: 48.58121ms -Jul 29 16:09:50.979: INFO: The phase of Pod labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:09:52.998: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.068096058s -Jul 29 16:09:52.998: INFO: The phase of Pod labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:09:54.995: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a": Phase="Pending", Reason="", readiness=false. Elapsed: 4.065370349s -Jul 29 16:09:54.995: INFO: The phase of Pod labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:09:56.985: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.055237s -Jul 29 16:09:56.985: INFO: The phase of Pod labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:09:58.989: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a": Phase="Running", Reason="", readiness=true. Elapsed: 8.059605358s -Jul 29 16:09:58.990: INFO: The phase of Pod labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a is Running (Ready = true) -Jul 29 16:09:58.990: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a" satisfied condition "running and ready" -Jul 29 16:09:59.558: INFO: Successfully updated pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a" -[AfterEach] [sig-storage] Downward API volume - test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:01.607: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Downward API volume - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Downward API volume - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Downward API volume - tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-3226" for this suite. 07/29/23 16:10:01.619 ------------------------------- -• [SLOW TEST] [11.639 seconds] -[sig-storage] Downward API volume -test/e2e/common/storage/framework.go:23 - should update labels on modification [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:130 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Downward API volume - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:09:49.991 - Jul 29 16:09:49.991: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 16:09:50.035 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:09:50.528 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:09:50.535 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 - [It] should update labels on modification [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:130 - STEP: Creating the pod 07/29/23 16:09:50.545 - Jul 29 16:09:50.929: INFO: Waiting up to 5m0s for pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a" in namespace "downward-api-3226" to be "running and ready" - Jul 29 16:09:50.978: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a": Phase="Pending", Reason="", readiness=false. Elapsed: 48.58121ms - Jul 29 16:09:50.979: INFO: The phase of Pod labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:09:52.998: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.068096058s - Jul 29 16:09:52.998: INFO: The phase of Pod labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:09:54.995: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a": Phase="Pending", Reason="", readiness=false. Elapsed: 4.065370349s - Jul 29 16:09:54.995: INFO: The phase of Pod labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:09:56.985: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.055237s - Jul 29 16:09:56.985: INFO: The phase of Pod labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:09:58.989: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a": Phase="Running", Reason="", readiness=true. Elapsed: 8.059605358s - Jul 29 16:09:58.990: INFO: The phase of Pod labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a is Running (Ready = true) - Jul 29 16:09:58.990: INFO: Pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a" satisfied condition "running and ready" - Jul 29 16:09:59.558: INFO: Successfully updated pod "labelsupdate1692d4ca-5a30-43fc-a5af-9ebe01a1027a" - [AfterEach] [sig-storage] Downward API volume - test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:01.607: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Downward API volume - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Downward API volume - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Downward API volume - tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-3226" for this suite. 07/29/23 16:10:01.619 + STEP: Destroying namespace "gc-4959" for this suite. 08/24/23 12:10:00.911 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] - test/e2e/apimachinery/webhook.go:277 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition + listing custom resource definition objects works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:85 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:01.635 -Jul 29 16:10:01.636: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 16:10:01.638 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:01.706 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:01.711 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:10:00.986 +Aug 24 12:10:00.986: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename custom-resource-definition 08/24/23 12:10:00.991 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:01.193 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:01.203 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 16:10:01.783 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:10:02.209 -STEP: Deploying the webhook pod 07/29/23 16:10:02.223 -STEP: Wait for the deployment to be ready 07/29/23 16:10:02.25 -Jul 29 16:10:02.263: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -STEP: Deploying the webhook service 07/29/23 16:10:04.351 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:10:04.377 -Jul 29 16:10:05.378: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] - test/e2e/apimachinery/webhook.go:277 -STEP: Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API 07/29/23 16:10:05.39 -STEP: Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API 07/29/23 16:10:05.434 -STEP: Creating a dummy validating-webhook-configuration object 07/29/23 16:10:05.467 -STEP: Deleting the validating-webhook-configuration, which should be possible to remove 07/29/23 16:10:05.512 -STEP: Creating a dummy mutating-webhook-configuration object 07/29/23 16:10:05.527 -STEP: Deleting the mutating-webhook-configuration, which should be possible to remove 07/29/23 16:10:05.591 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[It] listing custom resource definition objects works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:85 +Aug 24 12:10:01.213: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:05.736: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 12:10:07.733: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-3646" for this suite. 07/29/23 16:10:05.918 -STEP: Destroying namespace "webhook-3646-markers" for this suite. 07/29/23 16:10:05.961 +STEP: Destroying namespace "custom-resource-definition-1007" for this suite. 08/24/23 12:10:07.752 ------------------------------ -• [4.353 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +• [SLOW TEST] [6.782 seconds] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/apimachinery/framework.go:23 - should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] - test/e2e/apimachinery/webhook.go:277 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:01.635 - Jul 29 16:10:01.636: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 16:10:01.638 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:01.706 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:01.711 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 16:10:01.783 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:10:02.209 - STEP: Deploying the webhook pod 07/29/23 16:10:02.223 - STEP: Wait for the deployment to be ready 07/29/23 16:10:02.25 - Jul 29 16:10:02.263: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created - STEP: Deploying the webhook service 07/29/23 16:10:04.351 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:10:04.377 - Jul 29 16:10:05.378: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] - test/e2e/apimachinery/webhook.go:277 - STEP: Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API 07/29/23 16:10:05.39 - STEP: Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API 07/29/23 16:10:05.434 - STEP: Creating a dummy validating-webhook-configuration object 07/29/23 16:10:05.467 - STEP: Deleting the validating-webhook-configuration, which should be possible to remove 07/29/23 16:10:05.512 - STEP: Creating a dummy mutating-webhook-configuration object 07/29/23 16:10:05.527 - STEP: Deleting the mutating-webhook-configuration, which should be possible to remove 07/29/23 16:10:05.591 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:05.736: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-3646" for this suite. 07/29/23 16:10:05.918 - STEP: Destroying namespace "webhook-3646-markers" for this suite. 07/29/23 16:10:05.961 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSS ------------------------------- -[sig-node] Security Context when creating containers with AllowPrivilegeEscalation - should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:609 -[BeforeEach] [sig-node] Security Context - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:05.992 -Jul 29 16:10:05.992: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename security-context-test 07/29/23 16:10:06.004 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:06.098 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:06.104 -[BeforeEach] [sig-node] Security Context - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Security Context - test/e2e/common/node/security_context.go:50 -[It] should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:609 -Jul 29 16:10:06.145: INFO: Waiting up to 5m0s for pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a" in namespace "security-context-test-6717" to be "Succeeded or Failed" -Jul 29 16:10:06.154: INFO: Pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a": Phase="Pending", Reason="", readiness=false. Elapsed: 8.889951ms -Jul 29 16:10:08.162: INFO: Pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017528609s -Jul 29 16:10:10.163: INFO: Pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018186876s -Jul 29 16:10:12.161: INFO: Pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016603074s -Jul 29 16:10:12.161: INFO: Pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a" satisfied condition "Succeeded or Failed" -[AfterEach] [sig-node] Security Context - test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:12.178: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Security Context - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Security Context - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Security Context - tear down framework | framework.go:193 -STEP: Destroying namespace "security-context-test-6717" for this suite. 07/29/23 16:10:12.187 ------------------------------- -• [SLOW TEST] [6.205 seconds] -[sig-node] Security Context -test/e2e/common/node/framework.go:23 - when creating containers with AllowPrivilegeEscalation - test/e2e/common/node/security_context.go:555 - should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:609 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Security Context - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:05.992 - Jul 29 16:10:05.992: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename security-context-test 07/29/23 16:10:06.004 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:06.098 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:06.104 - [BeforeEach] [sig-node] Security Context - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Security Context - test/e2e/common/node/security_context.go:50 - [It] should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:609 - Jul 29 16:10:06.145: INFO: Waiting up to 5m0s for pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a" in namespace "security-context-test-6717" to be "Succeeded or Failed" - Jul 29 16:10:06.154: INFO: Pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a": Phase="Pending", Reason="", readiness=false. Elapsed: 8.889951ms - Jul 29 16:10:08.162: INFO: Pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017528609s - Jul 29 16:10:10.163: INFO: Pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018186876s - Jul 29 16:10:12.161: INFO: Pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016603074s - Jul 29 16:10:12.161: INFO: Pod "alpine-nnp-false-8cf451ba-2338-4ace-99c6-edac72a2db1a" satisfied condition "Succeeded or Failed" - [AfterEach] [sig-node] Security Context - test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:12.178: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Security Context - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Security Context - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Security Context - tear down framework | framework.go:193 - STEP: Destroying namespace "security-context-test-6717" for this suite. 07/29/23 16:10:12.187 - << End Captured GinkgoWriter Output ------------------------------- -SSSS ------------------------------- -[sig-storage] Projected secret - should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:56 -[BeforeEach] [sig-storage] Projected secret - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:12.198 -Jul 29 16:10:12.198: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:10:12.201 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:12.3 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:12.306 -[BeforeEach] [sig-storage] Projected secret - test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:56 -STEP: Creating projection with secret that has name projected-secret-test-f87c931e-4190-494b-b1f6-27e922a283dc 07/29/23 16:10:12.31 -STEP: Creating a pod to test consume secrets 07/29/23 16:10:12.321 -Jul 29 16:10:12.346: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a" in namespace "projected-9634" to be "Succeeded or Failed" -Jul 29 16:10:12.353: INFO: Pod "pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.57747ms -Jul 29 16:10:14.362: INFO: Pod "pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015424565s -Jul 29 16:10:16.364: INFO: Pod "pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01739701s -STEP: Saw pod success 07/29/23 16:10:16.364 -Jul 29 16:10:16.365: INFO: Pod "pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a" satisfied condition "Succeeded or Failed" -Jul 29 16:10:16.371: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a container projected-secret-volume-test: -STEP: delete the pod 07/29/23 16:10:16.385 -Jul 29 16:10:16.412: INFO: Waiting for pod pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a to disappear -Jul 29 16:10:16.417: INFO: Pod pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a no longer exists -[AfterEach] [sig-storage] Projected secret - test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:16.418: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected secret - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected secret - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected secret - tear down framework | framework.go:193 -STEP: Destroying namespace "projected-9634" for this suite. 07/29/23 16:10:16.432 ------------------------------- -• [4.250 seconds] -[sig-storage] Projected secret -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:56 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected secret - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:12.198 - Jul 29 16:10:12.198: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:10:12.201 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:12.3 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:12.306 - [BeforeEach] [sig-storage] Projected secret - test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:56 - STEP: Creating projection with secret that has name projected-secret-test-f87c931e-4190-494b-b1f6-27e922a283dc 07/29/23 16:10:12.31 - STEP: Creating a pod to test consume secrets 07/29/23 16:10:12.321 - Jul 29 16:10:12.346: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a" in namespace "projected-9634" to be "Succeeded or Failed" - Jul 29 16:10:12.353: INFO: Pod "pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.57747ms - Jul 29 16:10:14.362: INFO: Pod "pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015424565s - Jul 29 16:10:16.364: INFO: Pod "pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01739701s - STEP: Saw pod success 07/29/23 16:10:16.364 - Jul 29 16:10:16.365: INFO: Pod "pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a" satisfied condition "Succeeded or Failed" - Jul 29 16:10:16.371: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a container projected-secret-volume-test: - STEP: delete the pod 07/29/23 16:10:16.385 - Jul 29 16:10:16.412: INFO: Waiting for pod pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a to disappear - Jul 29 16:10:16.417: INFO: Pod pod-projected-secrets-d4e50244-b93b-4f79-a377-7c3f4430320a no longer exists - [AfterEach] [sig-storage] Projected secret - test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:16.418: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected secret - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected secret - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected secret - tear down framework | framework.go:193 - STEP: Destroying namespace "projected-9634" for this suite. 07/29/23 16:10:16.432 - << End Captured GinkgoWriter Output ------------------------------- -[sig-network] Services - should complete a service status lifecycle [Conformance] - test/e2e/network/service.go:3428 -[BeforeEach] [sig-network] Services - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:16.449 -Jul 29 16:10:16.449: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 16:10:16.452 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:16.493 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:16.504 -[BeforeEach] [sig-network] Services - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should complete a service status lifecycle [Conformance] - test/e2e/network/service.go:3428 -STEP: creating a Service 07/29/23 16:10:16.518 -STEP: watching for the Service to be added 07/29/23 16:10:16.543 -Jul 29 16:10:16.548: INFO: Found Service test-service-lhfgp in namespace services-4454 with labels: map[test-service-static:true] & ports [{http TCP 80 {0 80 } 0}] -Jul 29 16:10:16.548: INFO: Service test-service-lhfgp created -STEP: Getting /status 07/29/23 16:10:16.548 -Jul 29 16:10:16.558: INFO: Service test-service-lhfgp has LoadBalancer: {[]} -STEP: patching the ServiceStatus 07/29/23 16:10:16.558 -STEP: watching for the Service to be patched 07/29/23 16:10:16.574 -Jul 29 16:10:16.577: INFO: observed Service test-service-lhfgp in namespace services-4454 with annotations: map[] & LoadBalancer: {[]} -Jul 29 16:10:16.578: INFO: Found Service test-service-lhfgp in namespace services-4454 with annotations: map[patchedstatus:true] & LoadBalancer: {[{203.0.113.1 []}]} -Jul 29 16:10:16.578: INFO: Service test-service-lhfgp has service status patched -STEP: updating the ServiceStatus 07/29/23 16:10:16.578 -Jul 29 16:10:16.597: INFO: updatedStatus.Conditions: []v1.Condition{v1.Condition{Type:"StatusUpdate", Status:"True", ObservedGeneration:0, LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} -STEP: watching for the Service to be updated 07/29/23 16:10:16.597 -Jul 29 16:10:16.602: INFO: Observed Service test-service-lhfgp in namespace services-4454 with annotations: map[] & Conditions: {[]} -Jul 29 16:10:16.602: INFO: Observed event: &Service{ObjectMeta:{test-service-lhfgp services-4454 26a0ebe7-9331-4bcd-a258-15de46e845d7 17236 0 2023-07-29 16:10:16 +0000 UTC map[test-service-static:true] map[patchedstatus:true] [] [] [{e2e.test Update v1 2023-07-29 16:10:16 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:test-service-static":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:sessionAffinity":{},"f:type":{}}} } {e2e.test Update v1 2023-07-29 16:10:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:patchedstatus":{}}},"f:status":{"f:loadBalancer":{"f:ingress":{}}}} status}]},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 80 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{},ClusterIP:10.233.52.179,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.233.52.179],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{LoadBalancerIngress{IP:203.0.113.1,Hostname:,Ports:[]PortStatus{},},},},Conditions:[]Condition{},},} -Jul 29 16:10:16.602: INFO: Found Service test-service-lhfgp in namespace services-4454 with annotations: map[patchedstatus:true] & Conditions: [{StatusUpdate True 0 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] -Jul 29 16:10:16.602: INFO: Service test-service-lhfgp has service status updated -STEP: patching the service 07/29/23 16:10:16.602 -STEP: watching for the Service to be patched 07/29/23 16:10:16.623 -Jul 29 16:10:16.630: INFO: observed Service test-service-lhfgp in namespace services-4454 with labels: map[test-service-static:true] -Jul 29 16:10:16.630: INFO: observed Service test-service-lhfgp in namespace services-4454 with labels: map[test-service-static:true] -Jul 29 16:10:16.630: INFO: observed Service test-service-lhfgp in namespace services-4454 with labels: map[test-service-static:true] -Jul 29 16:10:16.631: INFO: Found Service test-service-lhfgp in namespace services-4454 with labels: map[test-service:patched test-service-static:true] -Jul 29 16:10:16.631: INFO: Service test-service-lhfgp patched -STEP: deleting the service 07/29/23 16:10:16.631 -STEP: watching for the Service to be deleted 07/29/23 16:10:16.662 -Jul 29 16:10:16.666: INFO: Observed event: ADDED -Jul 29 16:10:16.667: INFO: Observed event: MODIFIED -Jul 29 16:10:16.667: INFO: Observed event: MODIFIED -Jul 29 16:10:16.667: INFO: Observed event: MODIFIED -Jul 29 16:10:16.667: INFO: Found Service test-service-lhfgp in namespace services-4454 with labels: map[test-service:patched test-service-static:true] & annotations: map[patchedstatus:true] -Jul 29 16:10:16.667: INFO: Service test-service-lhfgp deleted -[AfterEach] [sig-network] Services - test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:16.668: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services - tear down framework | framework.go:193 -STEP: Destroying namespace "services-4454" for this suite. 07/29/23 16:10:16.678 ------------------------------- -• [0.245 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should complete a service status lifecycle [Conformance] - test/e2e/network/service.go:3428 + Simple CustomResourceDefinition + test/e2e/apimachinery/custom_resource_definition.go:50 + listing custom resource definition objects works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:85 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:16.449 - Jul 29 16:10:16.449: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 16:10:16.452 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:16.493 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:16.504 - [BeforeEach] [sig-network] Services - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should complete a service status lifecycle [Conformance] - test/e2e/network/service.go:3428 - STEP: creating a Service 07/29/23 16:10:16.518 - STEP: watching for the Service to be added 07/29/23 16:10:16.543 - Jul 29 16:10:16.548: INFO: Found Service test-service-lhfgp in namespace services-4454 with labels: map[test-service-static:true] & ports [{http TCP 80 {0 80 } 0}] - Jul 29 16:10:16.548: INFO: Service test-service-lhfgp created - STEP: Getting /status 07/29/23 16:10:16.548 - Jul 29 16:10:16.558: INFO: Service test-service-lhfgp has LoadBalancer: {[]} - STEP: patching the ServiceStatus 07/29/23 16:10:16.558 - STEP: watching for the Service to be patched 07/29/23 16:10:16.574 - Jul 29 16:10:16.577: INFO: observed Service test-service-lhfgp in namespace services-4454 with annotations: map[] & LoadBalancer: {[]} - Jul 29 16:10:16.578: INFO: Found Service test-service-lhfgp in namespace services-4454 with annotations: map[patchedstatus:true] & LoadBalancer: {[{203.0.113.1 []}]} - Jul 29 16:10:16.578: INFO: Service test-service-lhfgp has service status patched - STEP: updating the ServiceStatus 07/29/23 16:10:16.578 - Jul 29 16:10:16.597: INFO: updatedStatus.Conditions: []v1.Condition{v1.Condition{Type:"StatusUpdate", Status:"True", ObservedGeneration:0, LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} - STEP: watching for the Service to be updated 07/29/23 16:10:16.597 - Jul 29 16:10:16.602: INFO: Observed Service test-service-lhfgp in namespace services-4454 with annotations: map[] & Conditions: {[]} - Jul 29 16:10:16.602: INFO: Observed event: &Service{ObjectMeta:{test-service-lhfgp services-4454 26a0ebe7-9331-4bcd-a258-15de46e845d7 17236 0 2023-07-29 16:10:16 +0000 UTC map[test-service-static:true] map[patchedstatus:true] [] [] [{e2e.test Update v1 2023-07-29 16:10:16 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:test-service-static":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:sessionAffinity":{},"f:type":{}}} } {e2e.test Update v1 2023-07-29 16:10:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:patchedstatus":{}}},"f:status":{"f:loadBalancer":{"f:ingress":{}}}} status}]},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 80 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{},ClusterIP:10.233.52.179,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.233.52.179],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{LoadBalancerIngress{IP:203.0.113.1,Hostname:,Ports:[]PortStatus{},},},},Conditions:[]Condition{},},} - Jul 29 16:10:16.602: INFO: Found Service test-service-lhfgp in namespace services-4454 with annotations: map[patchedstatus:true] & Conditions: [{StatusUpdate True 0 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] - Jul 29 16:10:16.602: INFO: Service test-service-lhfgp has service status updated - STEP: patching the service 07/29/23 16:10:16.602 - STEP: watching for the Service to be patched 07/29/23 16:10:16.623 - Jul 29 16:10:16.630: INFO: observed Service test-service-lhfgp in namespace services-4454 with labels: map[test-service-static:true] - Jul 29 16:10:16.630: INFO: observed Service test-service-lhfgp in namespace services-4454 with labels: map[test-service-static:true] - Jul 29 16:10:16.630: INFO: observed Service test-service-lhfgp in namespace services-4454 with labels: map[test-service-static:true] - Jul 29 16:10:16.631: INFO: Found Service test-service-lhfgp in namespace services-4454 with labels: map[test-service:patched test-service-static:true] - Jul 29 16:10:16.631: INFO: Service test-service-lhfgp patched - STEP: deleting the service 07/29/23 16:10:16.631 - STEP: watching for the Service to be deleted 07/29/23 16:10:16.662 - Jul 29 16:10:16.666: INFO: Observed event: ADDED - Jul 29 16:10:16.667: INFO: Observed event: MODIFIED - Jul 29 16:10:16.667: INFO: Observed event: MODIFIED - Jul 29 16:10:16.667: INFO: Observed event: MODIFIED - Jul 29 16:10:16.667: INFO: Found Service test-service-lhfgp in namespace services-4454 with labels: map[test-service:patched test-service-static:true] & annotations: map[patchedstatus:true] - Jul 29 16:10:16.667: INFO: Service test-service-lhfgp deleted - [AfterEach] [sig-network] Services + [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:10:00.986 + Aug 24 12:10:00.986: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename custom-resource-definition 08/24/23 12:10:00.991 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:01.193 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:01.203 + [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:31 + [It] listing custom resource definition objects works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:85 + Aug 24 12:10:01.213: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:16.668: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 12:10:07.733: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "services-4454" for this suite. 07/29/23 16:10:16.678 + STEP: Destroying namespace "custom-resource-definition-1007" for this suite. 08/24/23 12:10:07.752 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Downward API - should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:166 -[BeforeEach] [sig-node] Downward API +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should be able to deny custom resource creation, update and deletion [Conformance] + test/e2e/apimachinery/webhook.go:221 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:16.71 -Jul 29 16:10:16.710: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 16:10:16.712 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:16.746 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:16.753 -[BeforeEach] [sig-node] Downward API +STEP: Creating a kubernetes client 08/24/23 12:10:07.778 +Aug 24 12:10:07.778: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 12:10:07.781 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:07.827 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:07.833 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:166 -STEP: Creating a pod to test downward api env vars 07/29/23 16:10:16.762 -Jul 29 16:10:16.778: INFO: Waiting up to 5m0s for pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a" in namespace "downward-api-9724" to be "Succeeded or Failed" -Jul 29 16:10:16.784: INFO: Pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.601053ms -Jul 29 16:10:18.794: INFO: Pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016035641s -Jul 29 16:10:20.794: INFO: Pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a": Phase="Pending", Reason="", readiness=false. Elapsed: 4.016522263s -Jul 29 16:10:22.795: INFO: Pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016876732s -STEP: Saw pod success 07/29/23 16:10:22.795 -Jul 29 16:10:22.795: INFO: Pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a" satisfied condition "Succeeded or Failed" -Jul 29 16:10:22.803: INFO: Trying to get logs from node wetuj3nuajog-3 pod downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a container dapi-container: -STEP: delete the pod 07/29/23 16:10:22.822 -Jul 29 16:10:22.882: INFO: Waiting for pod downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a to disappear -Jul 29 16:10:22.889: INFO: Pod downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a no longer exists -[AfterEach] [sig-node] Downward API +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 12:10:07.868 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:10:08.879 +STEP: Deploying the webhook pod 08/24/23 12:10:08.898 +STEP: Wait for the deployment to be ready 08/24/23 12:10:08.945 +Aug 24 12:10:08.958: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +STEP: Deploying the webhook service 08/24/23 12:10:10.98 +STEP: Verifying the service has paired with the endpoint 08/24/23 12:10:11.044 +Aug 24 12:10:12.045: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should be able to deny custom resource creation, update and deletion [Conformance] + test/e2e/apimachinery/webhook.go:221 +Aug 24 12:10:12.052: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Registering the custom resource webhook via the AdmissionRegistration API 08/24/23 12:10:12.586 +Aug 24 12:10:12.623: INFO: Waiting for webhook configuration to be ready... +STEP: Creating a custom resource that should be denied by the webhook 08/24/23 12:10:12.775 +STEP: Creating a custom resource whose deletion would be denied by the webhook 08/24/23 12:10:14.933 +STEP: Updating the custom resource with disallowed data should be denied 08/24/23 12:10:14.95 +STEP: Deleting the custom resource should be denied 08/24/23 12:10:14.972 +STEP: Remove the offending key and value from the custom resource data 08/24/23 12:10:15 +STEP: Deleting the updated custom resource should be successful 08/24/23 12:10:15.031 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:22.889: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Downward API +Aug 24 12:10:15.622: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Downward API +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Downward API +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-9724" for this suite. 07/29/23 16:10:22.899 +STEP: Destroying namespace "webhook-229" for this suite. 08/24/23 12:10:15.739 +STEP: Destroying namespace "webhook-229-markers" for this suite. 08/24/23 12:10:15.755 ------------------------------ -• [SLOW TEST] [6.242 seconds] -[sig-node] Downward API -test/e2e/common/node/framework.go:23 - should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:166 +• [SLOW TEST] [8.003 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should be able to deny custom resource creation, update and deletion [Conformance] + test/e2e/apimachinery/webhook.go:221 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Downward API + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:16.71 - Jul 29 16:10:16.710: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 16:10:16.712 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:16.746 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:16.753 - [BeforeEach] [sig-node] Downward API + STEP: Creating a kubernetes client 08/24/23 12:10:07.778 + Aug 24 12:10:07.778: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 12:10:07.781 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:07.827 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:07.833 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:166 - STEP: Creating a pod to test downward api env vars 07/29/23 16:10:16.762 - Jul 29 16:10:16.778: INFO: Waiting up to 5m0s for pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a" in namespace "downward-api-9724" to be "Succeeded or Failed" - Jul 29 16:10:16.784: INFO: Pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.601053ms - Jul 29 16:10:18.794: INFO: Pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016035641s - Jul 29 16:10:20.794: INFO: Pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a": Phase="Pending", Reason="", readiness=false. Elapsed: 4.016522263s - Jul 29 16:10:22.795: INFO: Pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016876732s - STEP: Saw pod success 07/29/23 16:10:22.795 - Jul 29 16:10:22.795: INFO: Pod "downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a" satisfied condition "Succeeded or Failed" - Jul 29 16:10:22.803: INFO: Trying to get logs from node wetuj3nuajog-3 pod downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a container dapi-container: - STEP: delete the pod 07/29/23 16:10:22.822 - Jul 29 16:10:22.882: INFO: Waiting for pod downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a to disappear - Jul 29 16:10:22.889: INFO: Pod downward-api-e3ec3b3c-11e9-4d37-8d36-71173db57e5a no longer exists - [AfterEach] [sig-node] Downward API + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 12:10:07.868 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:10:08.879 + STEP: Deploying the webhook pod 08/24/23 12:10:08.898 + STEP: Wait for the deployment to be ready 08/24/23 12:10:08.945 + Aug 24 12:10:08.958: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created + STEP: Deploying the webhook service 08/24/23 12:10:10.98 + STEP: Verifying the service has paired with the endpoint 08/24/23 12:10:11.044 + Aug 24 12:10:12.045: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should be able to deny custom resource creation, update and deletion [Conformance] + test/e2e/apimachinery/webhook.go:221 + Aug 24 12:10:12.052: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Registering the custom resource webhook via the AdmissionRegistration API 08/24/23 12:10:12.586 + Aug 24 12:10:12.623: INFO: Waiting for webhook configuration to be ready... + STEP: Creating a custom resource that should be denied by the webhook 08/24/23 12:10:12.775 + STEP: Creating a custom resource whose deletion would be denied by the webhook 08/24/23 12:10:14.933 + STEP: Updating the custom resource with disallowed data should be denied 08/24/23 12:10:14.95 + STEP: Deleting the custom resource should be denied 08/24/23 12:10:14.972 + STEP: Remove the offending key and value from the custom resource data 08/24/23 12:10:15 + STEP: Deleting the updated custom resource should be successful 08/24/23 12:10:15.031 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:22.889: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Downward API + Aug 24 12:10:15.622: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Downward API + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Downward API + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-9724" for this suite. 07/29/23 16:10:22.899 + STEP: Destroying namespace "webhook-229" for this suite. 08/24/23 12:10:15.739 + STEP: Destroying namespace "webhook-229-markers" for this suite. 08/24/23 12:10:15.755 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-network] Ingress API - should support creating Ingress API operations [Conformance] - test/e2e/network/ingress.go:552 -[BeforeEach] [sig-network] Ingress API +[sig-storage] Downward API volume + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:249 +[BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:22.956 -Jul 29 16:10:22.956: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename ingress 07/29/23 16:10:22.959 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:22.987 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:22.991 -[BeforeEach] [sig-network] Ingress API +STEP: Creating a kubernetes client 08/24/23 12:10:15.801 +Aug 24 12:10:15.801: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:10:15.81 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:15.859 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:15.863 +[BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 -[It] should support creating Ingress API operations [Conformance] - test/e2e/network/ingress.go:552 -STEP: getting /apis 07/29/23 16:10:22.995 -STEP: getting /apis/networking.k8s.io 07/29/23 16:10:22.999 -STEP: getting /apis/networking.k8s.iov1 07/29/23 16:10:23.002 -STEP: creating 07/29/23 16:10:23.008 -STEP: getting 07/29/23 16:10:23.033 -STEP: listing 07/29/23 16:10:23.04 -STEP: watching 07/29/23 16:10:23.045 -Jul 29 16:10:23.045: INFO: starting watch -STEP: cluster-wide listing 07/29/23 16:10:23.046 -STEP: cluster-wide watching 07/29/23 16:10:23.052 -Jul 29 16:10:23.052: INFO: starting watch -STEP: patching 07/29/23 16:10:23.053 -STEP: updating 07/29/23 16:10:23.065 -Jul 29 16:10:23.075: INFO: waiting for watch events with expected annotations -Jul 29 16:10:23.075: INFO: saw patched and updated annotations -STEP: patching /status 07/29/23 16:10:23.075 -STEP: updating /status 07/29/23 16:10:23.083 -STEP: get /status 07/29/23 16:10:23.094 -STEP: deleting 07/29/23 16:10:23.1 -STEP: deleting a collection 07/29/23 16:10:23.118 -[AfterEach] [sig-network] Ingress API +[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:249 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:10:15.87 +Aug 24 12:10:15.895: INFO: Waiting up to 5m0s for pod "downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513" in namespace "downward-api-668" to be "Succeeded or Failed" +Aug 24 12:10:15.906: INFO: Pod "downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513": Phase="Pending", Reason="", readiness=false. Elapsed: 11.44222ms +Aug 24 12:10:17.913: INFO: Pod "downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018472001s +Aug 24 12:10:19.915: INFO: Pod "downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02064352s +STEP: Saw pod success 08/24/23 12:10:19.916 +Aug 24 12:10:19.916: INFO: Pod "downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513" satisfied condition "Succeeded or Failed" +Aug 24 12:10:19.922: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513 container client-container: +STEP: delete the pod 08/24/23 12:10:19.958 +Aug 24 12:10:19.980: INFO: Waiting for pod downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513 to disappear +Aug 24 12:10:19.988: INFO: Pod downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513 no longer exists +[AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:23.142: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Ingress API +Aug 24 12:10:19.989: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Ingress API +[DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Ingress API +[DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "ingress-4095" for this suite. 07/29/23 16:10:23.154 +STEP: Destroying namespace "downward-api-668" for this suite. 08/24/23 12:10:20.002 ------------------------------ -• [0.212 seconds] -[sig-network] Ingress API -test/e2e/network/common/framework.go:23 - should support creating Ingress API operations [Conformance] - test/e2e/network/ingress.go:552 +• [4.217 seconds] +[sig-storage] Downward API volume +test/e2e/common/storage/framework.go:23 + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:249 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Ingress API + [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:22.956 - Jul 29 16:10:22.956: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename ingress 07/29/23 16:10:22.959 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:22.987 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:22.991 - [BeforeEach] [sig-network] Ingress API + STEP: Creating a kubernetes client 08/24/23 12:10:15.801 + Aug 24 12:10:15.801: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:10:15.81 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:15.859 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:15.863 + [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 - [It] should support creating Ingress API operations [Conformance] - test/e2e/network/ingress.go:552 - STEP: getting /apis 07/29/23 16:10:22.995 - STEP: getting /apis/networking.k8s.io 07/29/23 16:10:22.999 - STEP: getting /apis/networking.k8s.iov1 07/29/23 16:10:23.002 - STEP: creating 07/29/23 16:10:23.008 - STEP: getting 07/29/23 16:10:23.033 - STEP: listing 07/29/23 16:10:23.04 - STEP: watching 07/29/23 16:10:23.045 - Jul 29 16:10:23.045: INFO: starting watch - STEP: cluster-wide listing 07/29/23 16:10:23.046 - STEP: cluster-wide watching 07/29/23 16:10:23.052 - Jul 29 16:10:23.052: INFO: starting watch - STEP: patching 07/29/23 16:10:23.053 - STEP: updating 07/29/23 16:10:23.065 - Jul 29 16:10:23.075: INFO: waiting for watch events with expected annotations - Jul 29 16:10:23.075: INFO: saw patched and updated annotations - STEP: patching /status 07/29/23 16:10:23.075 - STEP: updating /status 07/29/23 16:10:23.083 - STEP: get /status 07/29/23 16:10:23.094 - STEP: deleting 07/29/23 16:10:23.1 - STEP: deleting a collection 07/29/23 16:10:23.118 - [AfterEach] [sig-network] Ingress API + [BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 + [It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:249 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:10:15.87 + Aug 24 12:10:15.895: INFO: Waiting up to 5m0s for pod "downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513" in namespace "downward-api-668" to be "Succeeded or Failed" + Aug 24 12:10:15.906: INFO: Pod "downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513": Phase="Pending", Reason="", readiness=false. Elapsed: 11.44222ms + Aug 24 12:10:17.913: INFO: Pod "downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018472001s + Aug 24 12:10:19.915: INFO: Pod "downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02064352s + STEP: Saw pod success 08/24/23 12:10:19.916 + Aug 24 12:10:19.916: INFO: Pod "downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513" satisfied condition "Succeeded or Failed" + Aug 24 12:10:19.922: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513 container client-container: + STEP: delete the pod 08/24/23 12:10:19.958 + Aug 24 12:10:19.980: INFO: Waiting for pod downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513 to disappear + Aug 24 12:10:19.988: INFO: Pod downwardapi-volume-6e77840e-5db4-4799-86aa-6928176a6513 no longer exists + [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:23.142: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Ingress API + Aug 24 12:10:19.989: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Ingress API + [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Ingress API + [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "ingress-4095" for this suite. 07/29/23 16:10:23.154 + STEP: Destroying namespace "downward-api-668" for this suite. 08/24/23 12:10:20.002 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSS +SSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a pod. [Conformance] - test/e2e/apimachinery/resource_quota.go:230 -[BeforeEach] [sig-api-machinery] ResourceQuota +[sig-storage] Projected downwardAPI + should update labels on modification [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:130 +[BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:23.169 -Jul 29 16:10:23.169: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 16:10:23.176 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:23.209 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:23.214 -[BeforeEach] [sig-api-machinery] ResourceQuota +STEP: Creating a kubernetes client 08/24/23 12:10:20.019 +Aug 24 12:10:20.019: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:10:20.021 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:20.054 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:20.058 +[BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 -[It] should create a ResourceQuota and capture the life of a pod. [Conformance] - test/e2e/apimachinery/resource_quota.go:230 -STEP: Counting existing ResourceQuota 07/29/23 16:10:23.22 -STEP: Creating a ResourceQuota 07/29/23 16:10:28.229 -STEP: Ensuring resource quota status is calculated 07/29/23 16:10:28.241 -STEP: Creating a Pod that fits quota 07/29/23 16:10:30.249 -STEP: Ensuring ResourceQuota status captures the pod usage 07/29/23 16:10:30.27 -STEP: Not allowing a pod to be created that exceeds remaining quota 07/29/23 16:10:32.278 -STEP: Not allowing a pod to be created that exceeds remaining quota(validation on extended resources) 07/29/23 16:10:32.282 -STEP: Ensuring a pod cannot update its resource requirements 07/29/23 16:10:32.286 -STEP: Ensuring attempts to update pod resource requirements did not change quota usage 07/29/23 16:10:32.291 -STEP: Deleting the pod 07/29/23 16:10:34.303 -STEP: Ensuring resource quota status released the pod usage 07/29/23 16:10:34.339 -[AfterEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 +[It] should update labels on modification [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:130 +STEP: Creating the pod 08/24/23 12:10:20.064 +Aug 24 12:10:20.087: INFO: Waiting up to 5m0s for pod "labelsupdateff6228dd-9f6f-461d-9375-14130cdad774" in namespace "projected-2961" to be "running and ready" +Aug 24 12:10:20.107: INFO: Pod "labelsupdateff6228dd-9f6f-461d-9375-14130cdad774": Phase="Pending", Reason="", readiness=false. Elapsed: 19.376797ms +Aug 24 12:10:20.107: INFO: The phase of Pod labelsupdateff6228dd-9f6f-461d-9375-14130cdad774 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:10:22.114: INFO: Pod "labelsupdateff6228dd-9f6f-461d-9375-14130cdad774": Phase="Running", Reason="", readiness=true. Elapsed: 2.026925453s +Aug 24 12:10:22.115: INFO: The phase of Pod labelsupdateff6228dd-9f6f-461d-9375-14130cdad774 is Running (Ready = true) +Aug 24 12:10:22.115: INFO: Pod "labelsupdateff6228dd-9f6f-461d-9375-14130cdad774" satisfied condition "running and ready" +Aug 24 12:10:22.658: INFO: Successfully updated pod "labelsupdateff6228dd-9f6f-461d-9375-14130cdad774" +[AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:36.349: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +Aug 24 12:10:26.709: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-9024" for this suite. 07/29/23 16:10:36.357 +STEP: Destroying namespace "projected-2961" for this suite. 08/24/23 12:10:26.721 ------------------------------ -• [SLOW TEST] [13.203 seconds] -[sig-api-machinery] ResourceQuota -test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a pod. [Conformance] - test/e2e/apimachinery/resource_quota.go:230 +• [SLOW TEST] [6.715 seconds] +[sig-storage] Projected downwardAPI +test/e2e/common/storage/framework.go:23 + should update labels on modification [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:130 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:23.169 - Jul 29 16:10:23.169: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 16:10:23.176 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:23.209 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:23.214 - [BeforeEach] [sig-api-machinery] ResourceQuota + STEP: Creating a kubernetes client 08/24/23 12:10:20.019 + Aug 24 12:10:20.019: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:10:20.021 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:20.054 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:20.058 + [BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 - [It] should create a ResourceQuota and capture the life of a pod. [Conformance] - test/e2e/apimachinery/resource_quota.go:230 - STEP: Counting existing ResourceQuota 07/29/23 16:10:23.22 - STEP: Creating a ResourceQuota 07/29/23 16:10:28.229 - STEP: Ensuring resource quota status is calculated 07/29/23 16:10:28.241 - STEP: Creating a Pod that fits quota 07/29/23 16:10:30.249 - STEP: Ensuring ResourceQuota status captures the pod usage 07/29/23 16:10:30.27 - STEP: Not allowing a pod to be created that exceeds remaining quota 07/29/23 16:10:32.278 - STEP: Not allowing a pod to be created that exceeds remaining quota(validation on extended resources) 07/29/23 16:10:32.282 - STEP: Ensuring a pod cannot update its resource requirements 07/29/23 16:10:32.286 - STEP: Ensuring attempts to update pod resource requirements did not change quota usage 07/29/23 16:10:32.291 - STEP: Deleting the pod 07/29/23 16:10:34.303 - STEP: Ensuring resource quota status released the pod usage 07/29/23 16:10:34.339 - [AfterEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 + [It] should update labels on modification [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:130 + STEP: Creating the pod 08/24/23 12:10:20.064 + Aug 24 12:10:20.087: INFO: Waiting up to 5m0s for pod "labelsupdateff6228dd-9f6f-461d-9375-14130cdad774" in namespace "projected-2961" to be "running and ready" + Aug 24 12:10:20.107: INFO: Pod "labelsupdateff6228dd-9f6f-461d-9375-14130cdad774": Phase="Pending", Reason="", readiness=false. Elapsed: 19.376797ms + Aug 24 12:10:20.107: INFO: The phase of Pod labelsupdateff6228dd-9f6f-461d-9375-14130cdad774 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:10:22.114: INFO: Pod "labelsupdateff6228dd-9f6f-461d-9375-14130cdad774": Phase="Running", Reason="", readiness=true. Elapsed: 2.026925453s + Aug 24 12:10:22.115: INFO: The phase of Pod labelsupdateff6228dd-9f6f-461d-9375-14130cdad774 is Running (Ready = true) + Aug 24 12:10:22.115: INFO: Pod "labelsupdateff6228dd-9f6f-461d-9375-14130cdad774" satisfied condition "running and ready" + Aug 24 12:10:22.658: INFO: Successfully updated pod "labelsupdateff6228dd-9f6f-461d-9375-14130cdad774" + [AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:36.349: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + Aug 24 12:10:26.709: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-9024" for this suite. 07/29/23 16:10:36.357 + STEP: Destroying namespace "projected-2961" for this suite. 08/24/23 12:10:26.721 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSS +SSSSSSSSS ------------------------------ -[sig-network] Service endpoints latency - should not be very high [Conformance] - test/e2e/network/service_latency.go:59 -[BeforeEach] [sig-network] Service endpoints latency +[sig-storage] Projected configMap + should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:47 +[BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:36.377 -Jul 29 16:10:36.377: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename svc-latency 07/29/23 16:10:36.38 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:36.41 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:36.415 -[BeforeEach] [sig-network] Service endpoints latency +STEP: Creating a kubernetes client 08/24/23 12:10:26.737 +Aug 24 12:10:26.737: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:10:26.738 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:26.78 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:26.785 +[BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 -[It] should not be very high [Conformance] - test/e2e/network/service_latency.go:59 -Jul 29 16:10:36.419: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: creating replication controller svc-latency-rc in namespace svc-latency-3279 07/29/23 16:10:36.421 -I0729 16:10:36.430415 13 runners.go:193] Created replication controller with name: svc-latency-rc, namespace: svc-latency-3279, replica count: 1 -I0729 16:10:37.482463 13 runners.go:193] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -I0729 16:10:38.483428 13 runners.go:193] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Jul 29 16:10:38.606: INFO: Created: latency-svc-rt8wc -Jul 29 16:10:38.618: INFO: Got endpoints: latency-svc-rt8wc [34.088869ms] -Jul 29 16:10:38.644: INFO: Created: latency-svc-dnkrh -Jul 29 16:10:38.658: INFO: Created: latency-svc-gw4bw -Jul 29 16:10:38.668: INFO: Created: latency-svc-ghlcb -Jul 29 16:10:38.666: INFO: Got endpoints: latency-svc-dnkrh [45.293187ms] -Jul 29 16:10:38.685: INFO: Got endpoints: latency-svc-gw4bw [63.71407ms] -Jul 29 16:10:38.686: INFO: Created: latency-svc-fw2xb -Jul 29 16:10:38.708: INFO: Created: latency-svc-tx6f2 -Jul 29 16:10:38.715: INFO: Created: latency-svc-bqmnn -Jul 29 16:10:38.716: INFO: Got endpoints: latency-svc-ghlcb [95.000356ms] -Jul 29 16:10:38.725: INFO: Created: latency-svc-zfdrb -Jul 29 16:10:38.758: INFO: Created: latency-svc-mbnq7 -Jul 29 16:10:38.760: INFO: Created: latency-svc-dn48k -Jul 29 16:10:38.770: INFO: Created: latency-svc-vwvwg -Jul 29 16:10:38.784: INFO: Created: latency-svc-jhkxs -Jul 29 16:10:38.790: INFO: Created: latency-svc-kjm46 -Jul 29 16:10:38.806: INFO: Created: latency-svc-6b28c -Jul 29 16:10:38.814: INFO: Created: latency-svc-5z9wz -Jul 29 16:10:38.820: INFO: Got endpoints: latency-svc-fw2xb [198.100372ms] -Jul 29 16:10:38.830: INFO: Got endpoints: latency-svc-bqmnn [207.450406ms] -Jul 29 16:10:38.840: INFO: Created: latency-svc-t8mlx -Jul 29 16:10:38.841: INFO: Got endpoints: latency-svc-mbnq7 [218.539002ms] -Jul 29 16:10:38.841: INFO: Got endpoints: latency-svc-tx6f2 [218.089412ms] -Jul 29 16:10:38.842: INFO: Got endpoints: latency-svc-zfdrb [218.923806ms] -Jul 29 16:10:38.865: INFO: Created: latency-svc-9sjwc -Jul 29 16:10:38.884: INFO: Created: latency-svc-jp2j9 -Jul 29 16:10:38.924: INFO: Created: latency-svc-h6vcw -Jul 29 16:10:38.924: INFO: Got endpoints: latency-svc-vwvwg [300.636657ms] -Jul 29 16:10:38.924: INFO: Got endpoints: latency-svc-dn48k [301.122606ms] -Jul 29 16:10:38.924: INFO: Got endpoints: latency-svc-jhkxs [301.803559ms] -Jul 29 16:10:38.924: INFO: Got endpoints: latency-svc-kjm46 [302.040457ms] -Jul 29 16:10:38.924: INFO: Got endpoints: latency-svc-6b28c [301.920184ms] -Jul 29 16:10:38.939: INFO: Got endpoints: latency-svc-t8mlx [315.175295ms] -Jul 29 16:10:38.949: INFO: Got endpoints: latency-svc-9sjwc [281.276257ms] -Jul 29 16:10:38.951: INFO: Got endpoints: latency-svc-5z9wz [326.761641ms] -Jul 29 16:10:38.955: INFO: Created: latency-svc-k8zp7 -Jul 29 16:10:38.961: INFO: Got endpoints: latency-svc-jp2j9 [274.950142ms] -Jul 29 16:10:38.961: INFO: Got endpoints: latency-svc-h6vcw [244.954464ms] -Jul 29 16:10:38.974: INFO: Got endpoints: latency-svc-k8zp7 [154.147783ms] -Jul 29 16:10:38.975: INFO: Created: latency-svc-8fd2b -Jul 29 16:10:39.002: INFO: Created: latency-svc-t6nbb -Jul 29 16:10:39.013: INFO: Got endpoints: latency-svc-8fd2b [182.288048ms] -Jul 29 16:10:39.022: INFO: Got endpoints: latency-svc-t6nbb [181.154753ms] -Jul 29 16:10:39.024: INFO: Created: latency-svc-zvw47 -Jul 29 16:10:39.045: INFO: Got endpoints: latency-svc-zvw47 [203.087744ms] -Jul 29 16:10:39.049: INFO: Created: latency-svc-k8glb -Jul 29 16:10:39.066: INFO: Got endpoints: latency-svc-k8glb [223.759221ms] -Jul 29 16:10:39.070: INFO: Created: latency-svc-4k6rj -Jul 29 16:10:39.087: INFO: Got endpoints: latency-svc-4k6rj [162.173887ms] -Jul 29 16:10:39.094: INFO: Created: latency-svc-kgzfj -Jul 29 16:10:39.101: INFO: Got endpoints: latency-svc-kgzfj [176.404163ms] -Jul 29 16:10:39.109: INFO: Created: latency-svc-v4pxv -Jul 29 16:10:39.118: INFO: Got endpoints: latency-svc-v4pxv [190.26221ms] -Jul 29 16:10:39.120: INFO: Created: latency-svc-fr6px -Jul 29 16:10:39.136: INFO: Got endpoints: latency-svc-fr6px [207.50357ms] -Jul 29 16:10:39.147: INFO: Created: latency-svc-9sfvq -Jul 29 16:10:39.161: INFO: Got endpoints: latency-svc-9sfvq [231.834458ms] -Jul 29 16:10:39.164: INFO: Created: latency-svc-mgcht -Jul 29 16:10:39.176: INFO: Got endpoints: latency-svc-mgcht [236.682819ms] -Jul 29 16:10:39.177: INFO: Created: latency-svc-m9c6t -Jul 29 16:10:39.191: INFO: Got endpoints: latency-svc-m9c6t [241.426418ms] -Jul 29 16:10:39.204: INFO: Created: latency-svc-kkn2x -Jul 29 16:10:39.217: INFO: Got endpoints: latency-svc-kkn2x [266.180365ms] -Jul 29 16:10:39.226: INFO: Created: latency-svc-hlxnm -Jul 29 16:10:39.238: INFO: Created: latency-svc-xqf6x -Jul 29 16:10:39.243: INFO: Got endpoints: latency-svc-hlxnm [281.794835ms] -Jul 29 16:10:39.258: INFO: Created: latency-svc-xbvtm -Jul 29 16:10:39.266: INFO: Got endpoints: latency-svc-xqf6x [304.841131ms] -Jul 29 16:10:39.281: INFO: Got endpoints: latency-svc-xbvtm [306.641761ms] -Jul 29 16:10:39.281: INFO: Created: latency-svc-4jdj2 -Jul 29 16:10:39.293: INFO: Created: latency-svc-trz45 -Jul 29 16:10:39.297: INFO: Got endpoints: latency-svc-4jdj2 [283.811802ms] -Jul 29 16:10:39.308: INFO: Got endpoints: latency-svc-trz45 [286.289579ms] -Jul 29 16:10:39.317: INFO: Created: latency-svc-7h424 -Jul 29 16:10:39.337: INFO: Created: latency-svc-kqvg7 -Jul 29 16:10:39.340: INFO: Got endpoints: latency-svc-7h424 [294.996983ms] -Jul 29 16:10:39.353: INFO: Got endpoints: latency-svc-kqvg7 [286.7499ms] -Jul 29 16:10:39.370: INFO: Created: latency-svc-dlbtc -Jul 29 16:10:39.374: INFO: Got endpoints: latency-svc-dlbtc [287.2344ms] -Jul 29 16:10:39.384: INFO: Created: latency-svc-hpsst -Jul 29 16:10:39.393: INFO: Created: latency-svc-2st8c -Jul 29 16:10:39.402: INFO: Created: latency-svc-7z68h -Jul 29 16:10:39.413: INFO: Got endpoints: latency-svc-hpsst [311.716002ms] -Jul 29 16:10:39.413: INFO: Created: latency-svc-6gzft -Jul 29 16:10:39.422: INFO: Created: latency-svc-42sql -Jul 29 16:10:39.434: INFO: Created: latency-svc-n7db2 -Jul 29 16:10:39.450: INFO: Got endpoints: latency-svc-2st8c [331.635634ms] -Jul 29 16:10:39.455: INFO: Created: latency-svc-s47cm -Jul 29 16:10:39.457: INFO: Got endpoints: latency-svc-7z68h [320.657876ms] -Jul 29 16:10:39.468: INFO: Got endpoints: latency-svc-6gzft [307.138114ms] -Jul 29 16:10:39.481: INFO: Got endpoints: latency-svc-n7db2 [290.060239ms] -Jul 29 16:10:39.482: INFO: Got endpoints: latency-svc-42sql [306.296716ms] -Jul 29 16:10:39.497: INFO: Got endpoints: latency-svc-s47cm [280.026035ms] -Jul 29 16:10:39.513: INFO: Created: latency-svc-rp9mm -Jul 29 16:10:39.518: INFO: Created: latency-svc-l987h -Jul 29 16:10:39.534: INFO: Got endpoints: latency-svc-rp9mm [291.303258ms] -Jul 29 16:10:39.540: INFO: Created: latency-svc-sv4mv -Jul 29 16:10:39.559: INFO: Created: latency-svc-7kgqx -Jul 29 16:10:39.569: INFO: Created: latency-svc-vqj99 -Jul 29 16:10:39.585: INFO: Got endpoints: latency-svc-l987h [318.786604ms] -Jul 29 16:10:39.587: INFO: Created: latency-svc-grxkm -Jul 29 16:10:39.598: INFO: Created: latency-svc-fmgw8 -Jul 29 16:10:39.614: INFO: Created: latency-svc-wsr8n -Jul 29 16:10:39.619: INFO: Got endpoints: latency-svc-sv4mv [337.015961ms] -Jul 29 16:10:39.647: INFO: Created: latency-svc-d57rn -Jul 29 16:10:39.672: INFO: Created: latency-svc-xw6b5 -Jul 29 16:10:39.678: INFO: Got endpoints: latency-svc-7kgqx [381.295511ms] -Jul 29 16:10:39.689: INFO: Created: latency-svc-k6548 -Jul 29 16:10:39.703: INFO: Created: latency-svc-nn4xm -Jul 29 16:10:39.721: INFO: Created: latency-svc-g2lkz -Jul 29 16:10:39.736: INFO: Got endpoints: latency-svc-vqj99 [427.375016ms] -Jul 29 16:10:39.770: INFO: Got endpoints: latency-svc-grxkm [430.046281ms] -Jul 29 16:10:39.783: INFO: Created: latency-svc-bjr69 -Jul 29 16:10:39.787: INFO: Created: latency-svc-6klxg -Jul 29 16:10:39.814: INFO: Created: latency-svc-f4lts -Jul 29 16:10:39.828: INFO: Got endpoints: latency-svc-fmgw8 [475.564726ms] -Jul 29 16:10:39.837: INFO: Created: latency-svc-f896d -Jul 29 16:10:39.847: INFO: Created: latency-svc-d4wp2 -Jul 29 16:10:39.868: INFO: Created: latency-svc-npm2c -Jul 29 16:10:39.868: INFO: Got endpoints: latency-svc-wsr8n [493.506093ms] -Jul 29 16:10:39.879: INFO: Created: latency-svc-w9xmx -Jul 29 16:10:39.916: INFO: Created: latency-svc-9xwx7 -Jul 29 16:10:39.930: INFO: Created: latency-svc-4bhhp -Jul 29 16:10:39.930: INFO: Created: latency-svc-nw4bz -Jul 29 16:10:39.976: INFO: Got endpoints: latency-svc-d57rn [563.629522ms] -Jul 29 16:10:39.991: INFO: Got endpoints: latency-svc-xw6b5 [540.582421ms] -Jul 29 16:10:40.012: INFO: Created: latency-svc-fks87 -Jul 29 16:10:40.021: INFO: Got endpoints: latency-svc-k6548 [563.263459ms] -Jul 29 16:10:40.036: INFO: Created: latency-svc-4tfjf -Jul 29 16:10:40.045: INFO: Created: latency-svc-tkcfq -Jul 29 16:10:40.068: INFO: Got endpoints: latency-svc-nn4xm [599.898692ms] -Jul 29 16:10:40.091: INFO: Created: latency-svc-dlljb -Jul 29 16:10:40.140: INFO: Got endpoints: latency-svc-g2lkz [658.279346ms] -Jul 29 16:10:40.162: INFO: Created: latency-svc-mpnzg -Jul 29 16:10:40.203: INFO: Got endpoints: latency-svc-bjr69 [721.578222ms] -Jul 29 16:10:40.219: INFO: Created: latency-svc-hv6mt -Jul 29 16:10:40.226: INFO: Got endpoints: latency-svc-6klxg [727.509847ms] -Jul 29 16:10:40.246: INFO: Created: latency-svc-nc5zz -Jul 29 16:10:40.266: INFO: Got endpoints: latency-svc-f4lts [731.694053ms] -Jul 29 16:10:40.287: INFO: Created: latency-svc-kxx5j -Jul 29 16:10:40.318: INFO: Got endpoints: latency-svc-f896d [698.874506ms] -Jul 29 16:10:40.336: INFO: Created: latency-svc-c8hlc -Jul 29 16:10:40.363: INFO: Got endpoints: latency-svc-d4wp2 [684.784551ms] -Jul 29 16:10:40.381: INFO: Created: latency-svc-fq89x -Jul 29 16:10:40.414: INFO: Got endpoints: latency-svc-npm2c [678.001074ms] -Jul 29 16:10:40.432: INFO: Created: latency-svc-kkjwl -Jul 29 16:10:40.466: INFO: Got endpoints: latency-svc-w9xmx [695.318471ms] -Jul 29 16:10:40.481: INFO: Created: latency-svc-m98pz -Jul 29 16:10:40.515: INFO: Got endpoints: latency-svc-4bhhp [927.355533ms] -Jul 29 16:10:40.534: INFO: Created: latency-svc-q89c4 -Jul 29 16:10:40.570: INFO: Got endpoints: latency-svc-9xwx7 [741.11959ms] -Jul 29 16:10:40.589: INFO: Created: latency-svc-nqvp8 -Jul 29 16:10:40.615: INFO: Got endpoints: latency-svc-nw4bz [746.809035ms] -Jul 29 16:10:40.646: INFO: Created: latency-svc-npl58 -Jul 29 16:10:40.669: INFO: Got endpoints: latency-svc-fks87 [692.195817ms] -Jul 29 16:10:40.687: INFO: Created: latency-svc-nb4c8 -Jul 29 16:10:40.715: INFO: Got endpoints: latency-svc-4tfjf [723.773406ms] -Jul 29 16:10:40.746: INFO: Created: latency-svc-pcfhv -Jul 29 16:10:40.767: INFO: Got endpoints: latency-svc-tkcfq [745.730729ms] -Jul 29 16:10:40.792: INFO: Created: latency-svc-g8mxm -Jul 29 16:10:40.815: INFO: Got endpoints: latency-svc-dlljb [747.122685ms] -Jul 29 16:10:40.838: INFO: Created: latency-svc-srjvk -Jul 29 16:10:40.868: INFO: Got endpoints: latency-svc-mpnzg [727.488471ms] -Jul 29 16:10:40.890: INFO: Created: latency-svc-crs9v -Jul 29 16:10:40.913: INFO: Got endpoints: latency-svc-hv6mt [710.63572ms] -Jul 29 16:10:40.929: INFO: Created: latency-svc-26pgg -Jul 29 16:10:40.970: INFO: Got endpoints: latency-svc-nc5zz [743.973448ms] -Jul 29 16:10:40.992: INFO: Created: latency-svc-4dzhg -Jul 29 16:10:41.014: INFO: Got endpoints: latency-svc-kxx5j [747.683294ms] -Jul 29 16:10:41.037: INFO: Created: latency-svc-m5kdh -Jul 29 16:10:41.066: INFO: Got endpoints: latency-svc-c8hlc [747.591797ms] -Jul 29 16:10:41.088: INFO: Created: latency-svc-7rp7p -Jul 29 16:10:41.120: INFO: Got endpoints: latency-svc-fq89x [756.022737ms] -Jul 29 16:10:41.141: INFO: Created: latency-svc-whp27 -Jul 29 16:10:41.165: INFO: Got endpoints: latency-svc-kkjwl [750.399179ms] -Jul 29 16:10:41.192: INFO: Created: latency-svc-8cwkh -Jul 29 16:10:41.216: INFO: Got endpoints: latency-svc-m98pz [750.303709ms] -Jul 29 16:10:41.233: INFO: Created: latency-svc-p2dht -Jul 29 16:10:41.266: INFO: Got endpoints: latency-svc-q89c4 [751.523012ms] -Jul 29 16:10:41.285: INFO: Created: latency-svc-9hvpd -Jul 29 16:10:41.318: INFO: Got endpoints: latency-svc-nqvp8 [748.690434ms] -Jul 29 16:10:41.348: INFO: Created: latency-svc-dgzfx -Jul 29 16:10:41.384: INFO: Got endpoints: latency-svc-npl58 [768.866375ms] -Jul 29 16:10:41.406: INFO: Created: latency-svc-64qh5 -Jul 29 16:10:41.414: INFO: Got endpoints: latency-svc-nb4c8 [744.819271ms] -Jul 29 16:10:41.437: INFO: Created: latency-svc-89wdv -Jul 29 16:10:41.463: INFO: Got endpoints: latency-svc-pcfhv [747.772804ms] -Jul 29 16:10:41.481: INFO: Created: latency-svc-77pwh -Jul 29 16:10:41.513: INFO: Got endpoints: latency-svc-g8mxm [746.738115ms] -Jul 29 16:10:41.534: INFO: Created: latency-svc-5jkcn -Jul 29 16:10:41.564: INFO: Got endpoints: latency-svc-srjvk [748.853441ms] -Jul 29 16:10:41.583: INFO: Created: latency-svc-5cwvn -Jul 29 16:10:41.614: INFO: Got endpoints: latency-svc-crs9v [745.860649ms] -Jul 29 16:10:41.644: INFO: Created: latency-svc-pkgn2 -Jul 29 16:10:41.665: INFO: Got endpoints: latency-svc-26pgg [751.553233ms] -Jul 29 16:10:41.690: INFO: Created: latency-svc-c72wz -Jul 29 16:10:41.718: INFO: Got endpoints: latency-svc-4dzhg [747.752197ms] -Jul 29 16:10:41.753: INFO: Created: latency-svc-wgvrb -Jul 29 16:10:41.781: INFO: Got endpoints: latency-svc-m5kdh [766.044481ms] -Jul 29 16:10:41.810: INFO: Created: latency-svc-zwq56 -Jul 29 16:10:41.813: INFO: Got endpoints: latency-svc-7rp7p [747.150777ms] -Jul 29 16:10:41.846: INFO: Created: latency-svc-czbsb -Jul 29 16:10:41.876: INFO: Got endpoints: latency-svc-whp27 [755.706697ms] -Jul 29 16:10:41.898: INFO: Created: latency-svc-gcdtj -Jul 29 16:10:41.916: INFO: Got endpoints: latency-svc-8cwkh [750.175416ms] -Jul 29 16:10:41.935: INFO: Created: latency-svc-crxbl -Jul 29 16:10:41.971: INFO: Got endpoints: latency-svc-p2dht [754.429243ms] -Jul 29 16:10:41.990: INFO: Created: latency-svc-9lw9q -Jul 29 16:10:42.017: INFO: Got endpoints: latency-svc-9hvpd [749.993868ms] -Jul 29 16:10:42.033: INFO: Created: latency-svc-vprtm -Jul 29 16:10:42.075: INFO: Got endpoints: latency-svc-dgzfx [756.349375ms] -Jul 29 16:10:42.094: INFO: Created: latency-svc-zzllh -Jul 29 16:10:42.123: INFO: Got endpoints: latency-svc-64qh5 [738.518021ms] -Jul 29 16:10:42.145: INFO: Created: latency-svc-qbk82 -Jul 29 16:10:42.167: INFO: Got endpoints: latency-svc-89wdv [752.86481ms] -Jul 29 16:10:42.198: INFO: Created: latency-svc-vznr5 -Jul 29 16:10:42.218: INFO: Got endpoints: latency-svc-77pwh [754.101072ms] -Jul 29 16:10:42.244: INFO: Created: latency-svc-h2nfs -Jul 29 16:10:42.274: INFO: Got endpoints: latency-svc-5jkcn [760.187244ms] -Jul 29 16:10:42.293: INFO: Created: latency-svc-q4chv -Jul 29 16:10:42.323: INFO: Got endpoints: latency-svc-5cwvn [759.24957ms] -Jul 29 16:10:42.354: INFO: Created: latency-svc-svqlj -Jul 29 16:10:42.366: INFO: Got endpoints: latency-svc-pkgn2 [751.821217ms] -Jul 29 16:10:42.385: INFO: Created: latency-svc-gwfj8 -Jul 29 16:10:42.415: INFO: Got endpoints: latency-svc-c72wz [748.926669ms] -Jul 29 16:10:42.432: INFO: Created: latency-svc-ww5dd -Jul 29 16:10:42.469: INFO: Got endpoints: latency-svc-wgvrb [749.597859ms] -Jul 29 16:10:42.485: INFO: Created: latency-svc-zz4hw -Jul 29 16:10:42.521: INFO: Got endpoints: latency-svc-zwq56 [739.856394ms] -Jul 29 16:10:42.539: INFO: Created: latency-svc-gc856 -Jul 29 16:10:42.564: INFO: Got endpoints: latency-svc-czbsb [750.936543ms] -Jul 29 16:10:42.580: INFO: Created: latency-svc-vkhnm -Jul 29 16:10:42.620: INFO: Got endpoints: latency-svc-gcdtj [742.46766ms] -Jul 29 16:10:42.641: INFO: Created: latency-svc-xlj99 -Jul 29 16:10:42.666: INFO: Got endpoints: latency-svc-crxbl [749.78388ms] -Jul 29 16:10:42.685: INFO: Created: latency-svc-2x6bm -Jul 29 16:10:42.714: INFO: Got endpoints: latency-svc-9lw9q [742.590705ms] -Jul 29 16:10:42.733: INFO: Created: latency-svc-64rl5 -Jul 29 16:10:42.766: INFO: Got endpoints: latency-svc-vprtm [748.703636ms] -Jul 29 16:10:42.784: INFO: Created: latency-svc-5rgf8 -Jul 29 16:10:42.817: INFO: Got endpoints: latency-svc-zzllh [741.875604ms] -Jul 29 16:10:42.846: INFO: Created: latency-svc-74h9w -Jul 29 16:10:42.868: INFO: Got endpoints: latency-svc-qbk82 [745.315042ms] -Jul 29 16:10:42.892: INFO: Created: latency-svc-v2jwp -Jul 29 16:10:42.921: INFO: Got endpoints: latency-svc-vznr5 [754.05164ms] -Jul 29 16:10:43.003: INFO: Got endpoints: latency-svc-h2nfs [784.732369ms] -Jul 29 16:10:43.008: INFO: Created: latency-svc-xj6dt -Jul 29 16:10:43.022: INFO: Got endpoints: latency-svc-q4chv [747.891813ms] -Jul 29 16:10:43.041: INFO: Created: latency-svc-rxswh -Jul 29 16:10:43.059: INFO: Created: latency-svc-j6qlh -Jul 29 16:10:43.063: INFO: Got endpoints: latency-svc-svqlj [739.369469ms] -Jul 29 16:10:43.077: INFO: Created: latency-svc-46hcv -Jul 29 16:10:43.113: INFO: Got endpoints: latency-svc-gwfj8 [747.16723ms] -Jul 29 16:10:43.135: INFO: Created: latency-svc-7bn94 -Jul 29 16:10:43.163: INFO: Got endpoints: latency-svc-ww5dd [747.899981ms] -Jul 29 16:10:43.178: INFO: Created: latency-svc-kpzkz -Jul 29 16:10:43.213: INFO: Got endpoints: latency-svc-zz4hw [744.409124ms] -Jul 29 16:10:43.239: INFO: Created: latency-svc-hrld7 -Jul 29 16:10:43.263: INFO: Got endpoints: latency-svc-gc856 [741.215149ms] -Jul 29 16:10:43.282: INFO: Created: latency-svc-bfv2n -Jul 29 16:10:43.319: INFO: Got endpoints: latency-svc-vkhnm [754.607951ms] -Jul 29 16:10:43.346: INFO: Created: latency-svc-rbbp5 -Jul 29 16:10:43.367: INFO: Got endpoints: latency-svc-xlj99 [747.747872ms] -Jul 29 16:10:43.388: INFO: Created: latency-svc-76p4n -Jul 29 16:10:43.420: INFO: Got endpoints: latency-svc-2x6bm [754.305473ms] -Jul 29 16:10:43.445: INFO: Created: latency-svc-lj4b2 -Jul 29 16:10:43.468: INFO: Got endpoints: latency-svc-64rl5 [753.639619ms] -Jul 29 16:10:43.486: INFO: Created: latency-svc-bdl2g -Jul 29 16:10:43.514: INFO: Got endpoints: latency-svc-5rgf8 [747.801709ms] -Jul 29 16:10:43.535: INFO: Created: latency-svc-nl8lc -Jul 29 16:10:43.571: INFO: Got endpoints: latency-svc-74h9w [752.801909ms] -Jul 29 16:10:43.595: INFO: Created: latency-svc-84426 -Jul 29 16:10:43.639: INFO: Got endpoints: latency-svc-v2jwp [770.559758ms] -Jul 29 16:10:43.680: INFO: Created: latency-svc-hbbh8 -Jul 29 16:10:43.684: INFO: Got endpoints: latency-svc-xj6dt [762.393572ms] -Jul 29 16:10:43.727: INFO: Got endpoints: latency-svc-rxswh [724.581012ms] -Jul 29 16:10:43.757: INFO: Created: latency-svc-2z6gm -Jul 29 16:10:43.773: INFO: Got endpoints: latency-svc-j6qlh [751.186251ms] -Jul 29 16:10:43.800: INFO: Created: latency-svc-vglcd -Jul 29 16:10:43.802: INFO: Created: latency-svc-c8bqk -Jul 29 16:10:43.823: INFO: Got endpoints: latency-svc-46hcv [759.778009ms] -Jul 29 16:10:43.843: INFO: Created: latency-svc-w2m2z -Jul 29 16:10:43.865: INFO: Got endpoints: latency-svc-7bn94 [750.627728ms] -Jul 29 16:10:43.921: INFO: Created: latency-svc-ss6fl -Jul 29 16:10:43.921: INFO: Got endpoints: latency-svc-kpzkz [758.074435ms] -Jul 29 16:10:43.953: INFO: Created: latency-svc-r8xpg -Jul 29 16:10:43.963: INFO: Got endpoints: latency-svc-hrld7 [750.274169ms] -Jul 29 16:10:43.982: INFO: Created: latency-svc-7lbt9 -Jul 29 16:10:44.023: INFO: Got endpoints: latency-svc-bfv2n [760.307784ms] -Jul 29 16:10:44.039: INFO: Created: latency-svc-c66dv -Jul 29 16:10:44.065: INFO: Got endpoints: latency-svc-rbbp5 [746.279828ms] -Jul 29 16:10:44.081: INFO: Created: latency-svc-bc6sq -Jul 29 16:10:44.115: INFO: Got endpoints: latency-svc-76p4n [747.343217ms] -Jul 29 16:10:44.133: INFO: Created: latency-svc-blqhz -Jul 29 16:10:44.166: INFO: Got endpoints: latency-svc-lj4b2 [745.736755ms] -Jul 29 16:10:44.185: INFO: Created: latency-svc-fkxxt -Jul 29 16:10:44.216: INFO: Got endpoints: latency-svc-bdl2g [748.262276ms] -Jul 29 16:10:44.237: INFO: Created: latency-svc-hdn26 -Jul 29 16:10:44.266: INFO: Got endpoints: latency-svc-nl8lc [751.596498ms] -Jul 29 16:10:44.302: INFO: Created: latency-svc-j2bp5 -Jul 29 16:10:44.321: INFO: Got endpoints: latency-svc-84426 [749.606516ms] -Jul 29 16:10:44.343: INFO: Created: latency-svc-tbpxg -Jul 29 16:10:44.371: INFO: Got endpoints: latency-svc-hbbh8 [731.818111ms] -Jul 29 16:10:44.390: INFO: Created: latency-svc-jtp8k -Jul 29 16:10:44.415: INFO: Got endpoints: latency-svc-2z6gm [731.148754ms] -Jul 29 16:10:44.440: INFO: Created: latency-svc-86brk -Jul 29 16:10:44.466: INFO: Got endpoints: latency-svc-vglcd [738.655909ms] -Jul 29 16:10:44.484: INFO: Created: latency-svc-9txnw -Jul 29 16:10:44.517: INFO: Got endpoints: latency-svc-c8bqk [742.741007ms] -Jul 29 16:10:44.535: INFO: Created: latency-svc-2xkxs -Jul 29 16:10:44.564: INFO: Got endpoints: latency-svc-w2m2z [740.781256ms] -Jul 29 16:10:44.583: INFO: Created: latency-svc-bg5zc -Jul 29 16:10:44.617: INFO: Got endpoints: latency-svc-ss6fl [752.1435ms] -Jul 29 16:10:44.641: INFO: Created: latency-svc-qkpr5 -Jul 29 16:10:44.666: INFO: Got endpoints: latency-svc-r8xpg [745.068402ms] -Jul 29 16:10:44.683: INFO: Created: latency-svc-qjjrl -Jul 29 16:10:44.716: INFO: Got endpoints: latency-svc-7lbt9 [752.518793ms] -Jul 29 16:10:44.734: INFO: Created: latency-svc-bbkql -Jul 29 16:10:44.766: INFO: Got endpoints: latency-svc-c66dv [743.334805ms] -Jul 29 16:10:44.783: INFO: Created: latency-svc-vbmwh -Jul 29 16:10:44.819: INFO: Got endpoints: latency-svc-bc6sq [753.585293ms] -Jul 29 16:10:44.839: INFO: Created: latency-svc-pt277 -Jul 29 16:10:44.866: INFO: Got endpoints: latency-svc-blqhz [750.664964ms] -Jul 29 16:10:44.884: INFO: Created: latency-svc-r5zxd -Jul 29 16:10:44.916: INFO: Got endpoints: latency-svc-fkxxt [749.74106ms] -Jul 29 16:10:44.936: INFO: Created: latency-svc-cgghn -Jul 29 16:10:44.973: INFO: Got endpoints: latency-svc-hdn26 [757.064552ms] -Jul 29 16:10:44.994: INFO: Created: latency-svc-t4mwr -Jul 29 16:10:45.041: INFO: Got endpoints: latency-svc-j2bp5 [774.972227ms] -Jul 29 16:10:45.060: INFO: Created: latency-svc-pqrfl -Jul 29 16:10:45.074: INFO: Got endpoints: latency-svc-tbpxg [752.923154ms] -Jul 29 16:10:45.099: INFO: Created: latency-svc-4d5hg -Jul 29 16:10:45.112: INFO: Got endpoints: latency-svc-jtp8k [740.501062ms] -Jul 29 16:10:45.130: INFO: Created: latency-svc-266xk -Jul 29 16:10:45.167: INFO: Got endpoints: latency-svc-86brk [752.250376ms] -Jul 29 16:10:45.191: INFO: Created: latency-svc-xnw2z -Jul 29 16:10:45.219: INFO: Got endpoints: latency-svc-9txnw [753.056859ms] -Jul 29 16:10:45.237: INFO: Created: latency-svc-n62ph -Jul 29 16:10:45.266: INFO: Got endpoints: latency-svc-2xkxs [748.454077ms] -Jul 29 16:10:45.286: INFO: Created: latency-svc-mdbcn -Jul 29 16:10:45.318: INFO: Got endpoints: latency-svc-bg5zc [753.634317ms] -Jul 29 16:10:45.342: INFO: Created: latency-svc-kghsd -Jul 29 16:10:45.361: INFO: Got endpoints: latency-svc-qkpr5 [743.686156ms] -Jul 29 16:10:45.382: INFO: Created: latency-svc-8mxtb -Jul 29 16:10:45.416: INFO: Got endpoints: latency-svc-qjjrl [749.628306ms] -Jul 29 16:10:45.436: INFO: Created: latency-svc-t88c6 -Jul 29 16:10:45.465: INFO: Got endpoints: latency-svc-bbkql [748.918457ms] -Jul 29 16:10:45.486: INFO: Created: latency-svc-gwzbp -Jul 29 16:10:45.516: INFO: Got endpoints: latency-svc-vbmwh [749.220419ms] -Jul 29 16:10:45.537: INFO: Created: latency-svc-mwmtn -Jul 29 16:10:45.566: INFO: Got endpoints: latency-svc-pt277 [745.355846ms] -Jul 29 16:10:45.587: INFO: Created: latency-svc-clqnr -Jul 29 16:10:45.618: INFO: Got endpoints: latency-svc-r5zxd [751.797494ms] -Jul 29 16:10:45.639: INFO: Created: latency-svc-7nk5w -Jul 29 16:10:45.666: INFO: Got endpoints: latency-svc-cgghn [750.193481ms] -Jul 29 16:10:45.684: INFO: Created: latency-svc-prjw7 -Jul 29 16:10:45.713: INFO: Got endpoints: latency-svc-t4mwr [738.716265ms] -Jul 29 16:10:45.732: INFO: Created: latency-svc-m572g -Jul 29 16:10:45.767: INFO: Got endpoints: latency-svc-pqrfl [724.719266ms] -Jul 29 16:10:45.790: INFO: Created: latency-svc-2ql4v -Jul 29 16:10:45.818: INFO: Got endpoints: latency-svc-4d5hg [740.395873ms] -Jul 29 16:10:45.837: INFO: Created: latency-svc-k4ksv -Jul 29 16:10:45.868: INFO: Got endpoints: latency-svc-266xk [756.011493ms] -Jul 29 16:10:45.885: INFO: Created: latency-svc-ltfnq -Jul 29 16:10:45.926: INFO: Got endpoints: latency-svc-xnw2z [757.923098ms] -Jul 29 16:10:45.946: INFO: Created: latency-svc-9ljsx -Jul 29 16:10:45.970: INFO: Got endpoints: latency-svc-n62ph [751.043164ms] -Jul 29 16:10:45.988: INFO: Created: latency-svc-mqn27 -Jul 29 16:10:46.016: INFO: Got endpoints: latency-svc-mdbcn [750.200784ms] -Jul 29 16:10:46.034: INFO: Created: latency-svc-nclxn -Jul 29 16:10:46.068: INFO: Got endpoints: latency-svc-kghsd [749.507975ms] -Jul 29 16:10:46.094: INFO: Created: latency-svc-vfzvr -Jul 29 16:10:46.117: INFO: Got endpoints: latency-svc-8mxtb [755.481854ms] -Jul 29 16:10:46.151: INFO: Created: latency-svc-8tddw -Jul 29 16:10:46.165: INFO: Got endpoints: latency-svc-t88c6 [748.457143ms] -Jul 29 16:10:46.185: INFO: Created: latency-svc-v5gjx -Jul 29 16:10:46.218: INFO: Got endpoints: latency-svc-gwzbp [752.538727ms] -Jul 29 16:10:46.237: INFO: Created: latency-svc-48zgg -Jul 29 16:10:46.265: INFO: Got endpoints: latency-svc-mwmtn [749.486816ms] -Jul 29 16:10:46.287: INFO: Created: latency-svc-rqspz -Jul 29 16:10:46.314: INFO: Got endpoints: latency-svc-clqnr [748.182573ms] -Jul 29 16:10:46.338: INFO: Created: latency-svc-55sl6 -Jul 29 16:10:46.366: INFO: Got endpoints: latency-svc-7nk5w [747.555815ms] -Jul 29 16:10:46.385: INFO: Created: latency-svc-gw9ds -Jul 29 16:10:46.418: INFO: Got endpoints: latency-svc-prjw7 [751.447302ms] -Jul 29 16:10:46.436: INFO: Created: latency-svc-gkmrw -Jul 29 16:10:46.464: INFO: Got endpoints: latency-svc-m572g [751.15606ms] -Jul 29 16:10:46.514: INFO: Got endpoints: latency-svc-2ql4v [744.202726ms] -Jul 29 16:10:46.578: INFO: Got endpoints: latency-svc-k4ksv [759.01028ms] -Jul 29 16:10:46.617: INFO: Got endpoints: latency-svc-ltfnq [748.211835ms] -Jul 29 16:10:46.664: INFO: Got endpoints: latency-svc-9ljsx [738.727944ms] -Jul 29 16:10:46.718: INFO: Got endpoints: latency-svc-mqn27 [746.701523ms] -Jul 29 16:10:46.770: INFO: Got endpoints: latency-svc-nclxn [753.131953ms] -Jul 29 16:10:46.817: INFO: Got endpoints: latency-svc-vfzvr [748.648272ms] -Jul 29 16:10:46.874: INFO: Got endpoints: latency-svc-8tddw [756.674084ms] -Jul 29 16:10:46.916: INFO: Got endpoints: latency-svc-v5gjx [750.798794ms] -Jul 29 16:10:46.975: INFO: Got endpoints: latency-svc-48zgg [757.099886ms] -Jul 29 16:10:47.014: INFO: Got endpoints: latency-svc-rqspz [748.92676ms] -Jul 29 16:10:47.070: INFO: Got endpoints: latency-svc-55sl6 [755.922277ms] -Jul 29 16:10:47.113: INFO: Got endpoints: latency-svc-gw9ds [746.161845ms] -Jul 29 16:10:47.165: INFO: Got endpoints: latency-svc-gkmrw [746.749929ms] -Jul 29 16:10:47.165: INFO: Latencies: [45.293187ms 63.71407ms 95.000356ms 154.147783ms 162.173887ms 176.404163ms 181.154753ms 182.288048ms 190.26221ms 198.100372ms 203.087744ms 207.450406ms 207.50357ms 218.089412ms 218.539002ms 218.923806ms 223.759221ms 231.834458ms 236.682819ms 241.426418ms 244.954464ms 266.180365ms 274.950142ms 280.026035ms 281.276257ms 281.794835ms 283.811802ms 286.289579ms 286.7499ms 287.2344ms 290.060239ms 291.303258ms 294.996983ms 300.636657ms 301.122606ms 301.803559ms 301.920184ms 302.040457ms 304.841131ms 306.296716ms 306.641761ms 307.138114ms 311.716002ms 315.175295ms 318.786604ms 320.657876ms 326.761641ms 331.635634ms 337.015961ms 381.295511ms 427.375016ms 430.046281ms 475.564726ms 493.506093ms 540.582421ms 563.263459ms 563.629522ms 599.898692ms 658.279346ms 678.001074ms 684.784551ms 692.195817ms 695.318471ms 698.874506ms 710.63572ms 721.578222ms 723.773406ms 724.581012ms 724.719266ms 727.488471ms 727.509847ms 731.148754ms 731.694053ms 731.818111ms 738.518021ms 738.655909ms 738.716265ms 738.727944ms 739.369469ms 739.856394ms 740.395873ms 740.501062ms 740.781256ms 741.11959ms 741.215149ms 741.875604ms 742.46766ms 742.590705ms 742.741007ms 743.334805ms 743.686156ms 743.973448ms 744.202726ms 744.409124ms 744.819271ms 745.068402ms 745.315042ms 745.355846ms 745.730729ms 745.736755ms 745.860649ms 746.161845ms 746.279828ms 746.701523ms 746.738115ms 746.749929ms 746.809035ms 747.122685ms 747.150777ms 747.16723ms 747.343217ms 747.555815ms 747.591797ms 747.683294ms 747.747872ms 747.752197ms 747.772804ms 747.801709ms 747.891813ms 747.899981ms 748.182573ms 748.211835ms 748.262276ms 748.454077ms 748.457143ms 748.648272ms 748.690434ms 748.703636ms 748.853441ms 748.918457ms 748.926669ms 748.92676ms 749.220419ms 749.486816ms 749.507975ms 749.597859ms 749.606516ms 749.628306ms 749.74106ms 749.78388ms 749.993868ms 750.175416ms 750.193481ms 750.200784ms 750.274169ms 750.303709ms 750.399179ms 750.627728ms 750.664964ms 750.798794ms 750.936543ms 751.043164ms 751.15606ms 751.186251ms 751.447302ms 751.523012ms 751.553233ms 751.596498ms 751.797494ms 751.821217ms 752.1435ms 752.250376ms 752.518793ms 752.538727ms 752.801909ms 752.86481ms 752.923154ms 753.056859ms 753.131953ms 753.585293ms 753.634317ms 753.639619ms 754.05164ms 754.101072ms 754.305473ms 754.429243ms 754.607951ms 755.481854ms 755.706697ms 755.922277ms 756.011493ms 756.022737ms 756.349375ms 756.674084ms 757.064552ms 757.099886ms 757.923098ms 758.074435ms 759.01028ms 759.24957ms 759.778009ms 760.187244ms 760.307784ms 762.393572ms 766.044481ms 768.866375ms 770.559758ms 774.972227ms 784.732369ms 927.355533ms] -Jul 29 16:10:47.166: INFO: 50 %ile: 745.860649ms -Jul 29 16:10:47.166: INFO: 90 %ile: 756.011493ms -Jul 29 16:10:47.166: INFO: 99 %ile: 784.732369ms -Jul 29 16:10:47.166: INFO: Total sample count: 200 -[AfterEach] [sig-network] Service endpoints latency +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:47 +STEP: Creating configMap with name projected-configmap-test-volume-875393fe-8068-4b0c-a6d0-a52afdfaacc7 08/24/23 12:10:26.793 +STEP: Creating a pod to test consume configMaps 08/24/23 12:10:26.803 +Aug 24 12:10:26.818: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305" in namespace "projected-3007" to be "Succeeded or Failed" +Aug 24 12:10:26.825: INFO: Pod "pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305": Phase="Pending", Reason="", readiness=false. Elapsed: 6.574206ms +Aug 24 12:10:28.833: INFO: Pod "pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015224556s +Aug 24 12:10:30.832: INFO: Pod "pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013571196s +STEP: Saw pod success 08/24/23 12:10:30.832 +Aug 24 12:10:30.832: INFO: Pod "pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305" satisfied condition "Succeeded or Failed" +Aug 24 12:10:30.837: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305 container agnhost-container: +STEP: delete the pod 08/24/23 12:10:30.849 +Aug 24 12:10:30.876: INFO: Waiting for pod pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305 to disappear +Aug 24 12:10:30.881: INFO: Pod pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305 no longer exists +[AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:47.167: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Service endpoints latency +Aug 24 12:10:30.882: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Service endpoints latency +[DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Service endpoints latency +[DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 -STEP: Destroying namespace "svc-latency-3279" for this suite. 07/29/23 16:10:47.179 +STEP: Destroying namespace "projected-3007" for this suite. 08/24/23 12:10:30.891 ------------------------------ -• [SLOW TEST] [10.814 seconds] -[sig-network] Service endpoints latency -test/e2e/network/common/framework.go:23 - should not be very high [Conformance] - test/e2e/network/service_latency.go:59 +• [4.166 seconds] +[sig-storage] Projected configMap +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:47 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Service endpoints latency + [BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:36.377 - Jul 29 16:10:36.377: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename svc-latency 07/29/23 16:10:36.38 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:36.41 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:36.415 - [BeforeEach] [sig-network] Service endpoints latency + STEP: Creating a kubernetes client 08/24/23 12:10:26.737 + Aug 24 12:10:26.737: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:10:26.738 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:26.78 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:26.785 + [BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 - [It] should not be very high [Conformance] - test/e2e/network/service_latency.go:59 - Jul 29 16:10:36.419: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: creating replication controller svc-latency-rc in namespace svc-latency-3279 07/29/23 16:10:36.421 - I0729 16:10:36.430415 13 runners.go:193] Created replication controller with name: svc-latency-rc, namespace: svc-latency-3279, replica count: 1 - I0729 16:10:37.482463 13 runners.go:193] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - I0729 16:10:38.483428 13 runners.go:193] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - Jul 29 16:10:38.606: INFO: Created: latency-svc-rt8wc - Jul 29 16:10:38.618: INFO: Got endpoints: latency-svc-rt8wc [34.088869ms] - Jul 29 16:10:38.644: INFO: Created: latency-svc-dnkrh - Jul 29 16:10:38.658: INFO: Created: latency-svc-gw4bw - Jul 29 16:10:38.668: INFO: Created: latency-svc-ghlcb - Jul 29 16:10:38.666: INFO: Got endpoints: latency-svc-dnkrh [45.293187ms] - Jul 29 16:10:38.685: INFO: Got endpoints: latency-svc-gw4bw [63.71407ms] - Jul 29 16:10:38.686: INFO: Created: latency-svc-fw2xb - Jul 29 16:10:38.708: INFO: Created: latency-svc-tx6f2 - Jul 29 16:10:38.715: INFO: Created: latency-svc-bqmnn - Jul 29 16:10:38.716: INFO: Got endpoints: latency-svc-ghlcb [95.000356ms] - Jul 29 16:10:38.725: INFO: Created: latency-svc-zfdrb - Jul 29 16:10:38.758: INFO: Created: latency-svc-mbnq7 - Jul 29 16:10:38.760: INFO: Created: latency-svc-dn48k - Jul 29 16:10:38.770: INFO: Created: latency-svc-vwvwg - Jul 29 16:10:38.784: INFO: Created: latency-svc-jhkxs - Jul 29 16:10:38.790: INFO: Created: latency-svc-kjm46 - Jul 29 16:10:38.806: INFO: Created: latency-svc-6b28c - Jul 29 16:10:38.814: INFO: Created: latency-svc-5z9wz - Jul 29 16:10:38.820: INFO: Got endpoints: latency-svc-fw2xb [198.100372ms] - Jul 29 16:10:38.830: INFO: Got endpoints: latency-svc-bqmnn [207.450406ms] - Jul 29 16:10:38.840: INFO: Created: latency-svc-t8mlx - Jul 29 16:10:38.841: INFO: Got endpoints: latency-svc-mbnq7 [218.539002ms] - Jul 29 16:10:38.841: INFO: Got endpoints: latency-svc-tx6f2 [218.089412ms] - Jul 29 16:10:38.842: INFO: Got endpoints: latency-svc-zfdrb [218.923806ms] - Jul 29 16:10:38.865: INFO: Created: latency-svc-9sjwc - Jul 29 16:10:38.884: INFO: Created: latency-svc-jp2j9 - Jul 29 16:10:38.924: INFO: Created: latency-svc-h6vcw - Jul 29 16:10:38.924: INFO: Got endpoints: latency-svc-vwvwg [300.636657ms] - Jul 29 16:10:38.924: INFO: Got endpoints: latency-svc-dn48k [301.122606ms] - Jul 29 16:10:38.924: INFO: Got endpoints: latency-svc-jhkxs [301.803559ms] - Jul 29 16:10:38.924: INFO: Got endpoints: latency-svc-kjm46 [302.040457ms] - Jul 29 16:10:38.924: INFO: Got endpoints: latency-svc-6b28c [301.920184ms] - Jul 29 16:10:38.939: INFO: Got endpoints: latency-svc-t8mlx [315.175295ms] - Jul 29 16:10:38.949: INFO: Got endpoints: latency-svc-9sjwc [281.276257ms] - Jul 29 16:10:38.951: INFO: Got endpoints: latency-svc-5z9wz [326.761641ms] - Jul 29 16:10:38.955: INFO: Created: latency-svc-k8zp7 - Jul 29 16:10:38.961: INFO: Got endpoints: latency-svc-jp2j9 [274.950142ms] - Jul 29 16:10:38.961: INFO: Got endpoints: latency-svc-h6vcw [244.954464ms] - Jul 29 16:10:38.974: INFO: Got endpoints: latency-svc-k8zp7 [154.147783ms] - Jul 29 16:10:38.975: INFO: Created: latency-svc-8fd2b - Jul 29 16:10:39.002: INFO: Created: latency-svc-t6nbb - Jul 29 16:10:39.013: INFO: Got endpoints: latency-svc-8fd2b [182.288048ms] - Jul 29 16:10:39.022: INFO: Got endpoints: latency-svc-t6nbb [181.154753ms] - Jul 29 16:10:39.024: INFO: Created: latency-svc-zvw47 - Jul 29 16:10:39.045: INFO: Got endpoints: latency-svc-zvw47 [203.087744ms] - Jul 29 16:10:39.049: INFO: Created: latency-svc-k8glb - Jul 29 16:10:39.066: INFO: Got endpoints: latency-svc-k8glb [223.759221ms] - Jul 29 16:10:39.070: INFO: Created: latency-svc-4k6rj - Jul 29 16:10:39.087: INFO: Got endpoints: latency-svc-4k6rj [162.173887ms] - Jul 29 16:10:39.094: INFO: Created: latency-svc-kgzfj - Jul 29 16:10:39.101: INFO: Got endpoints: latency-svc-kgzfj [176.404163ms] - Jul 29 16:10:39.109: INFO: Created: latency-svc-v4pxv - Jul 29 16:10:39.118: INFO: Got endpoints: latency-svc-v4pxv [190.26221ms] - Jul 29 16:10:39.120: INFO: Created: latency-svc-fr6px - Jul 29 16:10:39.136: INFO: Got endpoints: latency-svc-fr6px [207.50357ms] - Jul 29 16:10:39.147: INFO: Created: latency-svc-9sfvq - Jul 29 16:10:39.161: INFO: Got endpoints: latency-svc-9sfvq [231.834458ms] - Jul 29 16:10:39.164: INFO: Created: latency-svc-mgcht - Jul 29 16:10:39.176: INFO: Got endpoints: latency-svc-mgcht [236.682819ms] - Jul 29 16:10:39.177: INFO: Created: latency-svc-m9c6t - Jul 29 16:10:39.191: INFO: Got endpoints: latency-svc-m9c6t [241.426418ms] - Jul 29 16:10:39.204: INFO: Created: latency-svc-kkn2x - Jul 29 16:10:39.217: INFO: Got endpoints: latency-svc-kkn2x [266.180365ms] - Jul 29 16:10:39.226: INFO: Created: latency-svc-hlxnm - Jul 29 16:10:39.238: INFO: Created: latency-svc-xqf6x - Jul 29 16:10:39.243: INFO: Got endpoints: latency-svc-hlxnm [281.794835ms] - Jul 29 16:10:39.258: INFO: Created: latency-svc-xbvtm - Jul 29 16:10:39.266: INFO: Got endpoints: latency-svc-xqf6x [304.841131ms] - Jul 29 16:10:39.281: INFO: Got endpoints: latency-svc-xbvtm [306.641761ms] - Jul 29 16:10:39.281: INFO: Created: latency-svc-4jdj2 - Jul 29 16:10:39.293: INFO: Created: latency-svc-trz45 - Jul 29 16:10:39.297: INFO: Got endpoints: latency-svc-4jdj2 [283.811802ms] - Jul 29 16:10:39.308: INFO: Got endpoints: latency-svc-trz45 [286.289579ms] - Jul 29 16:10:39.317: INFO: Created: latency-svc-7h424 - Jul 29 16:10:39.337: INFO: Created: latency-svc-kqvg7 - Jul 29 16:10:39.340: INFO: Got endpoints: latency-svc-7h424 [294.996983ms] - Jul 29 16:10:39.353: INFO: Got endpoints: latency-svc-kqvg7 [286.7499ms] - Jul 29 16:10:39.370: INFO: Created: latency-svc-dlbtc - Jul 29 16:10:39.374: INFO: Got endpoints: latency-svc-dlbtc [287.2344ms] - Jul 29 16:10:39.384: INFO: Created: latency-svc-hpsst - Jul 29 16:10:39.393: INFO: Created: latency-svc-2st8c - Jul 29 16:10:39.402: INFO: Created: latency-svc-7z68h - Jul 29 16:10:39.413: INFO: Got endpoints: latency-svc-hpsst [311.716002ms] - Jul 29 16:10:39.413: INFO: Created: latency-svc-6gzft - Jul 29 16:10:39.422: INFO: Created: latency-svc-42sql - Jul 29 16:10:39.434: INFO: Created: latency-svc-n7db2 - Jul 29 16:10:39.450: INFO: Got endpoints: latency-svc-2st8c [331.635634ms] - Jul 29 16:10:39.455: INFO: Created: latency-svc-s47cm - Jul 29 16:10:39.457: INFO: Got endpoints: latency-svc-7z68h [320.657876ms] - Jul 29 16:10:39.468: INFO: Got endpoints: latency-svc-6gzft [307.138114ms] - Jul 29 16:10:39.481: INFO: Got endpoints: latency-svc-n7db2 [290.060239ms] - Jul 29 16:10:39.482: INFO: Got endpoints: latency-svc-42sql [306.296716ms] - Jul 29 16:10:39.497: INFO: Got endpoints: latency-svc-s47cm [280.026035ms] - Jul 29 16:10:39.513: INFO: Created: latency-svc-rp9mm - Jul 29 16:10:39.518: INFO: Created: latency-svc-l987h - Jul 29 16:10:39.534: INFO: Got endpoints: latency-svc-rp9mm [291.303258ms] - Jul 29 16:10:39.540: INFO: Created: latency-svc-sv4mv - Jul 29 16:10:39.559: INFO: Created: latency-svc-7kgqx - Jul 29 16:10:39.569: INFO: Created: latency-svc-vqj99 - Jul 29 16:10:39.585: INFO: Got endpoints: latency-svc-l987h [318.786604ms] - Jul 29 16:10:39.587: INFO: Created: latency-svc-grxkm - Jul 29 16:10:39.598: INFO: Created: latency-svc-fmgw8 - Jul 29 16:10:39.614: INFO: Created: latency-svc-wsr8n - Jul 29 16:10:39.619: INFO: Got endpoints: latency-svc-sv4mv [337.015961ms] - Jul 29 16:10:39.647: INFO: Created: latency-svc-d57rn - Jul 29 16:10:39.672: INFO: Created: latency-svc-xw6b5 - Jul 29 16:10:39.678: INFO: Got endpoints: latency-svc-7kgqx [381.295511ms] - Jul 29 16:10:39.689: INFO: Created: latency-svc-k6548 - Jul 29 16:10:39.703: INFO: Created: latency-svc-nn4xm - Jul 29 16:10:39.721: INFO: Created: latency-svc-g2lkz - Jul 29 16:10:39.736: INFO: Got endpoints: latency-svc-vqj99 [427.375016ms] - Jul 29 16:10:39.770: INFO: Got endpoints: latency-svc-grxkm [430.046281ms] - Jul 29 16:10:39.783: INFO: Created: latency-svc-bjr69 - Jul 29 16:10:39.787: INFO: Created: latency-svc-6klxg - Jul 29 16:10:39.814: INFO: Created: latency-svc-f4lts - Jul 29 16:10:39.828: INFO: Got endpoints: latency-svc-fmgw8 [475.564726ms] - Jul 29 16:10:39.837: INFO: Created: latency-svc-f896d - Jul 29 16:10:39.847: INFO: Created: latency-svc-d4wp2 - Jul 29 16:10:39.868: INFO: Created: latency-svc-npm2c - Jul 29 16:10:39.868: INFO: Got endpoints: latency-svc-wsr8n [493.506093ms] - Jul 29 16:10:39.879: INFO: Created: latency-svc-w9xmx - Jul 29 16:10:39.916: INFO: Created: latency-svc-9xwx7 - Jul 29 16:10:39.930: INFO: Created: latency-svc-4bhhp - Jul 29 16:10:39.930: INFO: Created: latency-svc-nw4bz - Jul 29 16:10:39.976: INFO: Got endpoints: latency-svc-d57rn [563.629522ms] - Jul 29 16:10:39.991: INFO: Got endpoints: latency-svc-xw6b5 [540.582421ms] - Jul 29 16:10:40.012: INFO: Created: latency-svc-fks87 - Jul 29 16:10:40.021: INFO: Got endpoints: latency-svc-k6548 [563.263459ms] - Jul 29 16:10:40.036: INFO: Created: latency-svc-4tfjf - Jul 29 16:10:40.045: INFO: Created: latency-svc-tkcfq - Jul 29 16:10:40.068: INFO: Got endpoints: latency-svc-nn4xm [599.898692ms] - Jul 29 16:10:40.091: INFO: Created: latency-svc-dlljb - Jul 29 16:10:40.140: INFO: Got endpoints: latency-svc-g2lkz [658.279346ms] - Jul 29 16:10:40.162: INFO: Created: latency-svc-mpnzg - Jul 29 16:10:40.203: INFO: Got endpoints: latency-svc-bjr69 [721.578222ms] - Jul 29 16:10:40.219: INFO: Created: latency-svc-hv6mt - Jul 29 16:10:40.226: INFO: Got endpoints: latency-svc-6klxg [727.509847ms] - Jul 29 16:10:40.246: INFO: Created: latency-svc-nc5zz - Jul 29 16:10:40.266: INFO: Got endpoints: latency-svc-f4lts [731.694053ms] - Jul 29 16:10:40.287: INFO: Created: latency-svc-kxx5j - Jul 29 16:10:40.318: INFO: Got endpoints: latency-svc-f896d [698.874506ms] - Jul 29 16:10:40.336: INFO: Created: latency-svc-c8hlc - Jul 29 16:10:40.363: INFO: Got endpoints: latency-svc-d4wp2 [684.784551ms] - Jul 29 16:10:40.381: INFO: Created: latency-svc-fq89x - Jul 29 16:10:40.414: INFO: Got endpoints: latency-svc-npm2c [678.001074ms] - Jul 29 16:10:40.432: INFO: Created: latency-svc-kkjwl - Jul 29 16:10:40.466: INFO: Got endpoints: latency-svc-w9xmx [695.318471ms] - Jul 29 16:10:40.481: INFO: Created: latency-svc-m98pz - Jul 29 16:10:40.515: INFO: Got endpoints: latency-svc-4bhhp [927.355533ms] - Jul 29 16:10:40.534: INFO: Created: latency-svc-q89c4 - Jul 29 16:10:40.570: INFO: Got endpoints: latency-svc-9xwx7 [741.11959ms] - Jul 29 16:10:40.589: INFO: Created: latency-svc-nqvp8 - Jul 29 16:10:40.615: INFO: Got endpoints: latency-svc-nw4bz [746.809035ms] - Jul 29 16:10:40.646: INFO: Created: latency-svc-npl58 - Jul 29 16:10:40.669: INFO: Got endpoints: latency-svc-fks87 [692.195817ms] - Jul 29 16:10:40.687: INFO: Created: latency-svc-nb4c8 - Jul 29 16:10:40.715: INFO: Got endpoints: latency-svc-4tfjf [723.773406ms] - Jul 29 16:10:40.746: INFO: Created: latency-svc-pcfhv - Jul 29 16:10:40.767: INFO: Got endpoints: latency-svc-tkcfq [745.730729ms] - Jul 29 16:10:40.792: INFO: Created: latency-svc-g8mxm - Jul 29 16:10:40.815: INFO: Got endpoints: latency-svc-dlljb [747.122685ms] - Jul 29 16:10:40.838: INFO: Created: latency-svc-srjvk - Jul 29 16:10:40.868: INFO: Got endpoints: latency-svc-mpnzg [727.488471ms] - Jul 29 16:10:40.890: INFO: Created: latency-svc-crs9v - Jul 29 16:10:40.913: INFO: Got endpoints: latency-svc-hv6mt [710.63572ms] - Jul 29 16:10:40.929: INFO: Created: latency-svc-26pgg - Jul 29 16:10:40.970: INFO: Got endpoints: latency-svc-nc5zz [743.973448ms] - Jul 29 16:10:40.992: INFO: Created: latency-svc-4dzhg - Jul 29 16:10:41.014: INFO: Got endpoints: latency-svc-kxx5j [747.683294ms] - Jul 29 16:10:41.037: INFO: Created: latency-svc-m5kdh - Jul 29 16:10:41.066: INFO: Got endpoints: latency-svc-c8hlc [747.591797ms] - Jul 29 16:10:41.088: INFO: Created: latency-svc-7rp7p - Jul 29 16:10:41.120: INFO: Got endpoints: latency-svc-fq89x [756.022737ms] - Jul 29 16:10:41.141: INFO: Created: latency-svc-whp27 - Jul 29 16:10:41.165: INFO: Got endpoints: latency-svc-kkjwl [750.399179ms] - Jul 29 16:10:41.192: INFO: Created: latency-svc-8cwkh - Jul 29 16:10:41.216: INFO: Got endpoints: latency-svc-m98pz [750.303709ms] - Jul 29 16:10:41.233: INFO: Created: latency-svc-p2dht - Jul 29 16:10:41.266: INFO: Got endpoints: latency-svc-q89c4 [751.523012ms] - Jul 29 16:10:41.285: INFO: Created: latency-svc-9hvpd - Jul 29 16:10:41.318: INFO: Got endpoints: latency-svc-nqvp8 [748.690434ms] - Jul 29 16:10:41.348: INFO: Created: latency-svc-dgzfx - Jul 29 16:10:41.384: INFO: Got endpoints: latency-svc-npl58 [768.866375ms] - Jul 29 16:10:41.406: INFO: Created: latency-svc-64qh5 - Jul 29 16:10:41.414: INFO: Got endpoints: latency-svc-nb4c8 [744.819271ms] - Jul 29 16:10:41.437: INFO: Created: latency-svc-89wdv - Jul 29 16:10:41.463: INFO: Got endpoints: latency-svc-pcfhv [747.772804ms] - Jul 29 16:10:41.481: INFO: Created: latency-svc-77pwh - Jul 29 16:10:41.513: INFO: Got endpoints: latency-svc-g8mxm [746.738115ms] - Jul 29 16:10:41.534: INFO: Created: latency-svc-5jkcn - Jul 29 16:10:41.564: INFO: Got endpoints: latency-svc-srjvk [748.853441ms] - Jul 29 16:10:41.583: INFO: Created: latency-svc-5cwvn - Jul 29 16:10:41.614: INFO: Got endpoints: latency-svc-crs9v [745.860649ms] - Jul 29 16:10:41.644: INFO: Created: latency-svc-pkgn2 - Jul 29 16:10:41.665: INFO: Got endpoints: latency-svc-26pgg [751.553233ms] - Jul 29 16:10:41.690: INFO: Created: latency-svc-c72wz - Jul 29 16:10:41.718: INFO: Got endpoints: latency-svc-4dzhg [747.752197ms] - Jul 29 16:10:41.753: INFO: Created: latency-svc-wgvrb - Jul 29 16:10:41.781: INFO: Got endpoints: latency-svc-m5kdh [766.044481ms] - Jul 29 16:10:41.810: INFO: Created: latency-svc-zwq56 - Jul 29 16:10:41.813: INFO: Got endpoints: latency-svc-7rp7p [747.150777ms] - Jul 29 16:10:41.846: INFO: Created: latency-svc-czbsb - Jul 29 16:10:41.876: INFO: Got endpoints: latency-svc-whp27 [755.706697ms] - Jul 29 16:10:41.898: INFO: Created: latency-svc-gcdtj - Jul 29 16:10:41.916: INFO: Got endpoints: latency-svc-8cwkh [750.175416ms] - Jul 29 16:10:41.935: INFO: Created: latency-svc-crxbl - Jul 29 16:10:41.971: INFO: Got endpoints: latency-svc-p2dht [754.429243ms] - Jul 29 16:10:41.990: INFO: Created: latency-svc-9lw9q - Jul 29 16:10:42.017: INFO: Got endpoints: latency-svc-9hvpd [749.993868ms] - Jul 29 16:10:42.033: INFO: Created: latency-svc-vprtm - Jul 29 16:10:42.075: INFO: Got endpoints: latency-svc-dgzfx [756.349375ms] - Jul 29 16:10:42.094: INFO: Created: latency-svc-zzllh - Jul 29 16:10:42.123: INFO: Got endpoints: latency-svc-64qh5 [738.518021ms] - Jul 29 16:10:42.145: INFO: Created: latency-svc-qbk82 - Jul 29 16:10:42.167: INFO: Got endpoints: latency-svc-89wdv [752.86481ms] - Jul 29 16:10:42.198: INFO: Created: latency-svc-vznr5 - Jul 29 16:10:42.218: INFO: Got endpoints: latency-svc-77pwh [754.101072ms] - Jul 29 16:10:42.244: INFO: Created: latency-svc-h2nfs - Jul 29 16:10:42.274: INFO: Got endpoints: latency-svc-5jkcn [760.187244ms] - Jul 29 16:10:42.293: INFO: Created: latency-svc-q4chv - Jul 29 16:10:42.323: INFO: Got endpoints: latency-svc-5cwvn [759.24957ms] - Jul 29 16:10:42.354: INFO: Created: latency-svc-svqlj - Jul 29 16:10:42.366: INFO: Got endpoints: latency-svc-pkgn2 [751.821217ms] - Jul 29 16:10:42.385: INFO: Created: latency-svc-gwfj8 - Jul 29 16:10:42.415: INFO: Got endpoints: latency-svc-c72wz [748.926669ms] - Jul 29 16:10:42.432: INFO: Created: latency-svc-ww5dd - Jul 29 16:10:42.469: INFO: Got endpoints: latency-svc-wgvrb [749.597859ms] - Jul 29 16:10:42.485: INFO: Created: latency-svc-zz4hw - Jul 29 16:10:42.521: INFO: Got endpoints: latency-svc-zwq56 [739.856394ms] - Jul 29 16:10:42.539: INFO: Created: latency-svc-gc856 - Jul 29 16:10:42.564: INFO: Got endpoints: latency-svc-czbsb [750.936543ms] - Jul 29 16:10:42.580: INFO: Created: latency-svc-vkhnm - Jul 29 16:10:42.620: INFO: Got endpoints: latency-svc-gcdtj [742.46766ms] - Jul 29 16:10:42.641: INFO: Created: latency-svc-xlj99 - Jul 29 16:10:42.666: INFO: Got endpoints: latency-svc-crxbl [749.78388ms] - Jul 29 16:10:42.685: INFO: Created: latency-svc-2x6bm - Jul 29 16:10:42.714: INFO: Got endpoints: latency-svc-9lw9q [742.590705ms] - Jul 29 16:10:42.733: INFO: Created: latency-svc-64rl5 - Jul 29 16:10:42.766: INFO: Got endpoints: latency-svc-vprtm [748.703636ms] - Jul 29 16:10:42.784: INFO: Created: latency-svc-5rgf8 - Jul 29 16:10:42.817: INFO: Got endpoints: latency-svc-zzllh [741.875604ms] - Jul 29 16:10:42.846: INFO: Created: latency-svc-74h9w - Jul 29 16:10:42.868: INFO: Got endpoints: latency-svc-qbk82 [745.315042ms] - Jul 29 16:10:42.892: INFO: Created: latency-svc-v2jwp - Jul 29 16:10:42.921: INFO: Got endpoints: latency-svc-vznr5 [754.05164ms] - Jul 29 16:10:43.003: INFO: Got endpoints: latency-svc-h2nfs [784.732369ms] - Jul 29 16:10:43.008: INFO: Created: latency-svc-xj6dt - Jul 29 16:10:43.022: INFO: Got endpoints: latency-svc-q4chv [747.891813ms] - Jul 29 16:10:43.041: INFO: Created: latency-svc-rxswh - Jul 29 16:10:43.059: INFO: Created: latency-svc-j6qlh - Jul 29 16:10:43.063: INFO: Got endpoints: latency-svc-svqlj [739.369469ms] - Jul 29 16:10:43.077: INFO: Created: latency-svc-46hcv - Jul 29 16:10:43.113: INFO: Got endpoints: latency-svc-gwfj8 [747.16723ms] - Jul 29 16:10:43.135: INFO: Created: latency-svc-7bn94 - Jul 29 16:10:43.163: INFO: Got endpoints: latency-svc-ww5dd [747.899981ms] - Jul 29 16:10:43.178: INFO: Created: latency-svc-kpzkz - Jul 29 16:10:43.213: INFO: Got endpoints: latency-svc-zz4hw [744.409124ms] - Jul 29 16:10:43.239: INFO: Created: latency-svc-hrld7 - Jul 29 16:10:43.263: INFO: Got endpoints: latency-svc-gc856 [741.215149ms] - Jul 29 16:10:43.282: INFO: Created: latency-svc-bfv2n - Jul 29 16:10:43.319: INFO: Got endpoints: latency-svc-vkhnm [754.607951ms] - Jul 29 16:10:43.346: INFO: Created: latency-svc-rbbp5 - Jul 29 16:10:43.367: INFO: Got endpoints: latency-svc-xlj99 [747.747872ms] - Jul 29 16:10:43.388: INFO: Created: latency-svc-76p4n - Jul 29 16:10:43.420: INFO: Got endpoints: latency-svc-2x6bm [754.305473ms] - Jul 29 16:10:43.445: INFO: Created: latency-svc-lj4b2 - Jul 29 16:10:43.468: INFO: Got endpoints: latency-svc-64rl5 [753.639619ms] - Jul 29 16:10:43.486: INFO: Created: latency-svc-bdl2g - Jul 29 16:10:43.514: INFO: Got endpoints: latency-svc-5rgf8 [747.801709ms] - Jul 29 16:10:43.535: INFO: Created: latency-svc-nl8lc - Jul 29 16:10:43.571: INFO: Got endpoints: latency-svc-74h9w [752.801909ms] - Jul 29 16:10:43.595: INFO: Created: latency-svc-84426 - Jul 29 16:10:43.639: INFO: Got endpoints: latency-svc-v2jwp [770.559758ms] - Jul 29 16:10:43.680: INFO: Created: latency-svc-hbbh8 - Jul 29 16:10:43.684: INFO: Got endpoints: latency-svc-xj6dt [762.393572ms] - Jul 29 16:10:43.727: INFO: Got endpoints: latency-svc-rxswh [724.581012ms] - Jul 29 16:10:43.757: INFO: Created: latency-svc-2z6gm - Jul 29 16:10:43.773: INFO: Got endpoints: latency-svc-j6qlh [751.186251ms] - Jul 29 16:10:43.800: INFO: Created: latency-svc-vglcd - Jul 29 16:10:43.802: INFO: Created: latency-svc-c8bqk - Jul 29 16:10:43.823: INFO: Got endpoints: latency-svc-46hcv [759.778009ms] - Jul 29 16:10:43.843: INFO: Created: latency-svc-w2m2z - Jul 29 16:10:43.865: INFO: Got endpoints: latency-svc-7bn94 [750.627728ms] - Jul 29 16:10:43.921: INFO: Created: latency-svc-ss6fl - Jul 29 16:10:43.921: INFO: Got endpoints: latency-svc-kpzkz [758.074435ms] - Jul 29 16:10:43.953: INFO: Created: latency-svc-r8xpg - Jul 29 16:10:43.963: INFO: Got endpoints: latency-svc-hrld7 [750.274169ms] - Jul 29 16:10:43.982: INFO: Created: latency-svc-7lbt9 - Jul 29 16:10:44.023: INFO: Got endpoints: latency-svc-bfv2n [760.307784ms] - Jul 29 16:10:44.039: INFO: Created: latency-svc-c66dv - Jul 29 16:10:44.065: INFO: Got endpoints: latency-svc-rbbp5 [746.279828ms] - Jul 29 16:10:44.081: INFO: Created: latency-svc-bc6sq - Jul 29 16:10:44.115: INFO: Got endpoints: latency-svc-76p4n [747.343217ms] - Jul 29 16:10:44.133: INFO: Created: latency-svc-blqhz - Jul 29 16:10:44.166: INFO: Got endpoints: latency-svc-lj4b2 [745.736755ms] - Jul 29 16:10:44.185: INFO: Created: latency-svc-fkxxt - Jul 29 16:10:44.216: INFO: Got endpoints: latency-svc-bdl2g [748.262276ms] - Jul 29 16:10:44.237: INFO: Created: latency-svc-hdn26 - Jul 29 16:10:44.266: INFO: Got endpoints: latency-svc-nl8lc [751.596498ms] - Jul 29 16:10:44.302: INFO: Created: latency-svc-j2bp5 - Jul 29 16:10:44.321: INFO: Got endpoints: latency-svc-84426 [749.606516ms] - Jul 29 16:10:44.343: INFO: Created: latency-svc-tbpxg - Jul 29 16:10:44.371: INFO: Got endpoints: latency-svc-hbbh8 [731.818111ms] - Jul 29 16:10:44.390: INFO: Created: latency-svc-jtp8k - Jul 29 16:10:44.415: INFO: Got endpoints: latency-svc-2z6gm [731.148754ms] - Jul 29 16:10:44.440: INFO: Created: latency-svc-86brk - Jul 29 16:10:44.466: INFO: Got endpoints: latency-svc-vglcd [738.655909ms] - Jul 29 16:10:44.484: INFO: Created: latency-svc-9txnw - Jul 29 16:10:44.517: INFO: Got endpoints: latency-svc-c8bqk [742.741007ms] - Jul 29 16:10:44.535: INFO: Created: latency-svc-2xkxs - Jul 29 16:10:44.564: INFO: Got endpoints: latency-svc-w2m2z [740.781256ms] - Jul 29 16:10:44.583: INFO: Created: latency-svc-bg5zc - Jul 29 16:10:44.617: INFO: Got endpoints: latency-svc-ss6fl [752.1435ms] - Jul 29 16:10:44.641: INFO: Created: latency-svc-qkpr5 - Jul 29 16:10:44.666: INFO: Got endpoints: latency-svc-r8xpg [745.068402ms] - Jul 29 16:10:44.683: INFO: Created: latency-svc-qjjrl - Jul 29 16:10:44.716: INFO: Got endpoints: latency-svc-7lbt9 [752.518793ms] - Jul 29 16:10:44.734: INFO: Created: latency-svc-bbkql - Jul 29 16:10:44.766: INFO: Got endpoints: latency-svc-c66dv [743.334805ms] - Jul 29 16:10:44.783: INFO: Created: latency-svc-vbmwh - Jul 29 16:10:44.819: INFO: Got endpoints: latency-svc-bc6sq [753.585293ms] - Jul 29 16:10:44.839: INFO: Created: latency-svc-pt277 - Jul 29 16:10:44.866: INFO: Got endpoints: latency-svc-blqhz [750.664964ms] - Jul 29 16:10:44.884: INFO: Created: latency-svc-r5zxd - Jul 29 16:10:44.916: INFO: Got endpoints: latency-svc-fkxxt [749.74106ms] - Jul 29 16:10:44.936: INFO: Created: latency-svc-cgghn - Jul 29 16:10:44.973: INFO: Got endpoints: latency-svc-hdn26 [757.064552ms] - Jul 29 16:10:44.994: INFO: Created: latency-svc-t4mwr - Jul 29 16:10:45.041: INFO: Got endpoints: latency-svc-j2bp5 [774.972227ms] - Jul 29 16:10:45.060: INFO: Created: latency-svc-pqrfl - Jul 29 16:10:45.074: INFO: Got endpoints: latency-svc-tbpxg [752.923154ms] - Jul 29 16:10:45.099: INFO: Created: latency-svc-4d5hg - Jul 29 16:10:45.112: INFO: Got endpoints: latency-svc-jtp8k [740.501062ms] - Jul 29 16:10:45.130: INFO: Created: latency-svc-266xk - Jul 29 16:10:45.167: INFO: Got endpoints: latency-svc-86brk [752.250376ms] - Jul 29 16:10:45.191: INFO: Created: latency-svc-xnw2z - Jul 29 16:10:45.219: INFO: Got endpoints: latency-svc-9txnw [753.056859ms] - Jul 29 16:10:45.237: INFO: Created: latency-svc-n62ph - Jul 29 16:10:45.266: INFO: Got endpoints: latency-svc-2xkxs [748.454077ms] - Jul 29 16:10:45.286: INFO: Created: latency-svc-mdbcn - Jul 29 16:10:45.318: INFO: Got endpoints: latency-svc-bg5zc [753.634317ms] - Jul 29 16:10:45.342: INFO: Created: latency-svc-kghsd - Jul 29 16:10:45.361: INFO: Got endpoints: latency-svc-qkpr5 [743.686156ms] - Jul 29 16:10:45.382: INFO: Created: latency-svc-8mxtb - Jul 29 16:10:45.416: INFO: Got endpoints: latency-svc-qjjrl [749.628306ms] - Jul 29 16:10:45.436: INFO: Created: latency-svc-t88c6 - Jul 29 16:10:45.465: INFO: Got endpoints: latency-svc-bbkql [748.918457ms] - Jul 29 16:10:45.486: INFO: Created: latency-svc-gwzbp - Jul 29 16:10:45.516: INFO: Got endpoints: latency-svc-vbmwh [749.220419ms] - Jul 29 16:10:45.537: INFO: Created: latency-svc-mwmtn - Jul 29 16:10:45.566: INFO: Got endpoints: latency-svc-pt277 [745.355846ms] - Jul 29 16:10:45.587: INFO: Created: latency-svc-clqnr - Jul 29 16:10:45.618: INFO: Got endpoints: latency-svc-r5zxd [751.797494ms] - Jul 29 16:10:45.639: INFO: Created: latency-svc-7nk5w - Jul 29 16:10:45.666: INFO: Got endpoints: latency-svc-cgghn [750.193481ms] - Jul 29 16:10:45.684: INFO: Created: latency-svc-prjw7 - Jul 29 16:10:45.713: INFO: Got endpoints: latency-svc-t4mwr [738.716265ms] - Jul 29 16:10:45.732: INFO: Created: latency-svc-m572g - Jul 29 16:10:45.767: INFO: Got endpoints: latency-svc-pqrfl [724.719266ms] - Jul 29 16:10:45.790: INFO: Created: latency-svc-2ql4v - Jul 29 16:10:45.818: INFO: Got endpoints: latency-svc-4d5hg [740.395873ms] - Jul 29 16:10:45.837: INFO: Created: latency-svc-k4ksv - Jul 29 16:10:45.868: INFO: Got endpoints: latency-svc-266xk [756.011493ms] - Jul 29 16:10:45.885: INFO: Created: latency-svc-ltfnq - Jul 29 16:10:45.926: INFO: Got endpoints: latency-svc-xnw2z [757.923098ms] - Jul 29 16:10:45.946: INFO: Created: latency-svc-9ljsx - Jul 29 16:10:45.970: INFO: Got endpoints: latency-svc-n62ph [751.043164ms] - Jul 29 16:10:45.988: INFO: Created: latency-svc-mqn27 - Jul 29 16:10:46.016: INFO: Got endpoints: latency-svc-mdbcn [750.200784ms] - Jul 29 16:10:46.034: INFO: Created: latency-svc-nclxn - Jul 29 16:10:46.068: INFO: Got endpoints: latency-svc-kghsd [749.507975ms] - Jul 29 16:10:46.094: INFO: Created: latency-svc-vfzvr - Jul 29 16:10:46.117: INFO: Got endpoints: latency-svc-8mxtb [755.481854ms] - Jul 29 16:10:46.151: INFO: Created: latency-svc-8tddw - Jul 29 16:10:46.165: INFO: Got endpoints: latency-svc-t88c6 [748.457143ms] - Jul 29 16:10:46.185: INFO: Created: latency-svc-v5gjx - Jul 29 16:10:46.218: INFO: Got endpoints: latency-svc-gwzbp [752.538727ms] - Jul 29 16:10:46.237: INFO: Created: latency-svc-48zgg - Jul 29 16:10:46.265: INFO: Got endpoints: latency-svc-mwmtn [749.486816ms] - Jul 29 16:10:46.287: INFO: Created: latency-svc-rqspz - Jul 29 16:10:46.314: INFO: Got endpoints: latency-svc-clqnr [748.182573ms] - Jul 29 16:10:46.338: INFO: Created: latency-svc-55sl6 - Jul 29 16:10:46.366: INFO: Got endpoints: latency-svc-7nk5w [747.555815ms] - Jul 29 16:10:46.385: INFO: Created: latency-svc-gw9ds - Jul 29 16:10:46.418: INFO: Got endpoints: latency-svc-prjw7 [751.447302ms] - Jul 29 16:10:46.436: INFO: Created: latency-svc-gkmrw - Jul 29 16:10:46.464: INFO: Got endpoints: latency-svc-m572g [751.15606ms] - Jul 29 16:10:46.514: INFO: Got endpoints: latency-svc-2ql4v [744.202726ms] - Jul 29 16:10:46.578: INFO: Got endpoints: latency-svc-k4ksv [759.01028ms] - Jul 29 16:10:46.617: INFO: Got endpoints: latency-svc-ltfnq [748.211835ms] - Jul 29 16:10:46.664: INFO: Got endpoints: latency-svc-9ljsx [738.727944ms] - Jul 29 16:10:46.718: INFO: Got endpoints: latency-svc-mqn27 [746.701523ms] - Jul 29 16:10:46.770: INFO: Got endpoints: latency-svc-nclxn [753.131953ms] - Jul 29 16:10:46.817: INFO: Got endpoints: latency-svc-vfzvr [748.648272ms] - Jul 29 16:10:46.874: INFO: Got endpoints: latency-svc-8tddw [756.674084ms] - Jul 29 16:10:46.916: INFO: Got endpoints: latency-svc-v5gjx [750.798794ms] - Jul 29 16:10:46.975: INFO: Got endpoints: latency-svc-48zgg [757.099886ms] - Jul 29 16:10:47.014: INFO: Got endpoints: latency-svc-rqspz [748.92676ms] - Jul 29 16:10:47.070: INFO: Got endpoints: latency-svc-55sl6 [755.922277ms] - Jul 29 16:10:47.113: INFO: Got endpoints: latency-svc-gw9ds [746.161845ms] - Jul 29 16:10:47.165: INFO: Got endpoints: latency-svc-gkmrw [746.749929ms] - Jul 29 16:10:47.165: INFO: Latencies: [45.293187ms 63.71407ms 95.000356ms 154.147783ms 162.173887ms 176.404163ms 181.154753ms 182.288048ms 190.26221ms 198.100372ms 203.087744ms 207.450406ms 207.50357ms 218.089412ms 218.539002ms 218.923806ms 223.759221ms 231.834458ms 236.682819ms 241.426418ms 244.954464ms 266.180365ms 274.950142ms 280.026035ms 281.276257ms 281.794835ms 283.811802ms 286.289579ms 286.7499ms 287.2344ms 290.060239ms 291.303258ms 294.996983ms 300.636657ms 301.122606ms 301.803559ms 301.920184ms 302.040457ms 304.841131ms 306.296716ms 306.641761ms 307.138114ms 311.716002ms 315.175295ms 318.786604ms 320.657876ms 326.761641ms 331.635634ms 337.015961ms 381.295511ms 427.375016ms 430.046281ms 475.564726ms 493.506093ms 540.582421ms 563.263459ms 563.629522ms 599.898692ms 658.279346ms 678.001074ms 684.784551ms 692.195817ms 695.318471ms 698.874506ms 710.63572ms 721.578222ms 723.773406ms 724.581012ms 724.719266ms 727.488471ms 727.509847ms 731.148754ms 731.694053ms 731.818111ms 738.518021ms 738.655909ms 738.716265ms 738.727944ms 739.369469ms 739.856394ms 740.395873ms 740.501062ms 740.781256ms 741.11959ms 741.215149ms 741.875604ms 742.46766ms 742.590705ms 742.741007ms 743.334805ms 743.686156ms 743.973448ms 744.202726ms 744.409124ms 744.819271ms 745.068402ms 745.315042ms 745.355846ms 745.730729ms 745.736755ms 745.860649ms 746.161845ms 746.279828ms 746.701523ms 746.738115ms 746.749929ms 746.809035ms 747.122685ms 747.150777ms 747.16723ms 747.343217ms 747.555815ms 747.591797ms 747.683294ms 747.747872ms 747.752197ms 747.772804ms 747.801709ms 747.891813ms 747.899981ms 748.182573ms 748.211835ms 748.262276ms 748.454077ms 748.457143ms 748.648272ms 748.690434ms 748.703636ms 748.853441ms 748.918457ms 748.926669ms 748.92676ms 749.220419ms 749.486816ms 749.507975ms 749.597859ms 749.606516ms 749.628306ms 749.74106ms 749.78388ms 749.993868ms 750.175416ms 750.193481ms 750.200784ms 750.274169ms 750.303709ms 750.399179ms 750.627728ms 750.664964ms 750.798794ms 750.936543ms 751.043164ms 751.15606ms 751.186251ms 751.447302ms 751.523012ms 751.553233ms 751.596498ms 751.797494ms 751.821217ms 752.1435ms 752.250376ms 752.518793ms 752.538727ms 752.801909ms 752.86481ms 752.923154ms 753.056859ms 753.131953ms 753.585293ms 753.634317ms 753.639619ms 754.05164ms 754.101072ms 754.305473ms 754.429243ms 754.607951ms 755.481854ms 755.706697ms 755.922277ms 756.011493ms 756.022737ms 756.349375ms 756.674084ms 757.064552ms 757.099886ms 757.923098ms 758.074435ms 759.01028ms 759.24957ms 759.778009ms 760.187244ms 760.307784ms 762.393572ms 766.044481ms 768.866375ms 770.559758ms 774.972227ms 784.732369ms 927.355533ms] - Jul 29 16:10:47.166: INFO: 50 %ile: 745.860649ms - Jul 29 16:10:47.166: INFO: 90 %ile: 756.011493ms - Jul 29 16:10:47.166: INFO: 99 %ile: 784.732369ms - Jul 29 16:10:47.166: INFO: Total sample count: 200 - [AfterEach] [sig-network] Service endpoints latency + [It] should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:47 + STEP: Creating configMap with name projected-configmap-test-volume-875393fe-8068-4b0c-a6d0-a52afdfaacc7 08/24/23 12:10:26.793 + STEP: Creating a pod to test consume configMaps 08/24/23 12:10:26.803 + Aug 24 12:10:26.818: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305" in namespace "projected-3007" to be "Succeeded or Failed" + Aug 24 12:10:26.825: INFO: Pod "pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305": Phase="Pending", Reason="", readiness=false. Elapsed: 6.574206ms + Aug 24 12:10:28.833: INFO: Pod "pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015224556s + Aug 24 12:10:30.832: INFO: Pod "pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013571196s + STEP: Saw pod success 08/24/23 12:10:30.832 + Aug 24 12:10:30.832: INFO: Pod "pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305" satisfied condition "Succeeded or Failed" + Aug 24 12:10:30.837: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305 container agnhost-container: + STEP: delete the pod 08/24/23 12:10:30.849 + Aug 24 12:10:30.876: INFO: Waiting for pod pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305 to disappear + Aug 24 12:10:30.881: INFO: Pod pod-projected-configmaps-3c96dcae-0bd7-4179-ac7d-0f368cf99305 no longer exists + [AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:47.167: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Service endpoints latency + Aug 24 12:10:30.882: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Service endpoints latency + [DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Service endpoints latency + [DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 - STEP: Destroying namespace "svc-latency-3279" for this suite. 07/29/23 16:10:47.179 + STEP: Destroying namespace "projected-3007" for this suite. 08/24/23 12:10:30.891 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSS ------------------------------ -[sig-storage] CSIInlineVolumes - should support ephemeral VolumeLifecycleMode in CSIDriver API [Conformance] - test/e2e/storage/csi_inline.go:46 -[BeforeEach] [sig-storage] CSIInlineVolumes +[sig-node] Sysctls [LinuxOnly] [NodeConformance] + should support sysctls [MinimumKubeletVersion:1.21] [Conformance] + test/e2e/common/node/sysctl.go:77 +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + test/e2e/common/node/sysctl.go:37 +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:47.21 -Jul 29 16:10:47.210: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename csiinlinevolumes 07/29/23 16:10:47.214 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:47.25 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:47.26 -[BeforeEach] [sig-storage] CSIInlineVolumes +STEP: Creating a kubernetes client 08/24/23 12:10:30.905 +Aug 24 12:10:30.905: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sysctl 08/24/23 12:10:30.907 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:30.952 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:30.959 +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/metrics/init/init.go:31 -[It] should support ephemeral VolumeLifecycleMode in CSIDriver API [Conformance] - test/e2e/storage/csi_inline.go:46 -STEP: creating 07/29/23 16:10:47.268 -STEP: getting 07/29/23 16:10:47.299 -STEP: listing 07/29/23 16:10:47.318 -STEP: deleting 07/29/23 16:10:47.335 -[AfterEach] [sig-storage] CSIInlineVolumes +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + test/e2e/common/node/sysctl.go:67 +[It] should support sysctls [MinimumKubeletVersion:1.21] [Conformance] + test/e2e/common/node/sysctl.go:77 +STEP: Creating a pod with the kernel.shm_rmid_forced sysctl 08/24/23 12:10:30.966 +STEP: Watching for error events or started pod 08/24/23 12:10:30.993 +STEP: Waiting for pod completion 08/24/23 12:10:33.002 +Aug 24 12:10:33.002: INFO: Waiting up to 3m0s for pod "sysctl-c799d591-a6fc-4a5c-9f38-2ad632f76aee" in namespace "sysctl-347" to be "completed" +Aug 24 12:10:33.008: INFO: Pod "sysctl-c799d591-a6fc-4a5c-9f38-2ad632f76aee": Phase="Pending", Reason="", readiness=false. Elapsed: 5.797782ms +Aug 24 12:10:35.017: INFO: Pod "sysctl-c799d591-a6fc-4a5c-9f38-2ad632f76aee": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014589688s +Aug 24 12:10:35.017: INFO: Pod "sysctl-c799d591-a6fc-4a5c-9f38-2ad632f76aee" satisfied condition "completed" +STEP: Checking that the pod succeeded 08/24/23 12:10:35.038 +STEP: Getting logs from the pod 08/24/23 12:10:35.043 +STEP: Checking that the sysctl is actually updated 08/24/23 12:10:35.06 +[AfterEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:47.373: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes +Aug 24 12:10:35.061: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes +[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes +[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] tear down framework | framework.go:193 -STEP: Destroying namespace "csiinlinevolumes-3440" for this suite. 07/29/23 16:10:47.383 +STEP: Destroying namespace "sysctl-347" for this suite. 08/24/23 12:10:35.071 ------------------------------ -• [0.190 seconds] -[sig-storage] CSIInlineVolumes -test/e2e/storage/utils/framework.go:23 - should support ephemeral VolumeLifecycleMode in CSIDriver API [Conformance] - test/e2e/storage/csi_inline.go:46 +• [4.187 seconds] +[sig-node] Sysctls [LinuxOnly] [NodeConformance] +test/e2e/common/node/framework.go:23 + should support sysctls [MinimumKubeletVersion:1.21] [Conformance] + test/e2e/common/node/sysctl.go:77 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] CSIInlineVolumes + [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + test/e2e/common/node/sysctl.go:37 + [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:47.21 - Jul 29 16:10:47.210: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename csiinlinevolumes 07/29/23 16:10:47.214 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:47.25 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:47.26 - [BeforeEach] [sig-storage] CSIInlineVolumes + STEP: Creating a kubernetes client 08/24/23 12:10:30.905 + Aug 24 12:10:30.905: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sysctl 08/24/23 12:10:30.907 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:30.952 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:30.959 + [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/metrics/init/init.go:31 - [It] should support ephemeral VolumeLifecycleMode in CSIDriver API [Conformance] - test/e2e/storage/csi_inline.go:46 - STEP: creating 07/29/23 16:10:47.268 - STEP: getting 07/29/23 16:10:47.299 - STEP: listing 07/29/23 16:10:47.318 - STEP: deleting 07/29/23 16:10:47.335 - [AfterEach] [sig-storage] CSIInlineVolumes + [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + test/e2e/common/node/sysctl.go:67 + [It] should support sysctls [MinimumKubeletVersion:1.21] [Conformance] + test/e2e/common/node/sysctl.go:77 + STEP: Creating a pod with the kernel.shm_rmid_forced sysctl 08/24/23 12:10:30.966 + STEP: Watching for error events or started pod 08/24/23 12:10:30.993 + STEP: Waiting for pod completion 08/24/23 12:10:33.002 + Aug 24 12:10:33.002: INFO: Waiting up to 3m0s for pod "sysctl-c799d591-a6fc-4a5c-9f38-2ad632f76aee" in namespace "sysctl-347" to be "completed" + Aug 24 12:10:33.008: INFO: Pod "sysctl-c799d591-a6fc-4a5c-9f38-2ad632f76aee": Phase="Pending", Reason="", readiness=false. Elapsed: 5.797782ms + Aug 24 12:10:35.017: INFO: Pod "sysctl-c799d591-a6fc-4a5c-9f38-2ad632f76aee": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014589688s + Aug 24 12:10:35.017: INFO: Pod "sysctl-c799d591-a6fc-4a5c-9f38-2ad632f76aee" satisfied condition "completed" + STEP: Checking that the pod succeeded 08/24/23 12:10:35.038 + STEP: Getting logs from the pod 08/24/23 12:10:35.043 + STEP: Checking that the sysctl is actually updated 08/24/23 12:10:35.06 + [AfterEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:47.373: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + Aug 24 12:10:35.061: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] tear down framework | framework.go:193 - STEP: Destroying namespace "csiinlinevolumes-3440" for this suite. 07/29/23 16:10:47.383 + STEP: Destroying namespace "sysctl-347" for this suite. 08/24/23 12:10:35.071 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-storage] ConfigMap +[sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:57 -[BeforeEach] [sig-storage] ConfigMap + test/e2e/common/storage/projected_configmap.go:57 +[BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:47.421 -Jul 29 16:10:47.421: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:10:47.423 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:47.464 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:47.467 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 12:10:35.094 +Aug 24 12:10:35.094: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:10:35.097 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:35.134 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:35.141 +[BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 [It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:57 -STEP: Creating configMap with name configmap-test-volume-e709e90d-17b2-4b2c-beec-4e185207c436 07/29/23 16:10:47.47 -STEP: Creating a pod to test consume configMaps 07/29/23 16:10:47.48 -Jul 29 16:10:47.498: INFO: Waiting up to 5m0s for pod "pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a" in namespace "configmap-1656" to be "Succeeded or Failed" -Jul 29 16:10:47.505: INFO: Pod "pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.748236ms -Jul 29 16:10:49.513: INFO: Pod "pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014325733s -Jul 29 16:10:51.517: INFO: Pod "pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018353231s -STEP: Saw pod success 07/29/23 16:10:51.517 -Jul 29 16:10:51.519: INFO: Pod "pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a" satisfied condition "Succeeded or Failed" -Jul 29 16:10:51.527: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a container agnhost-container: -STEP: delete the pod 07/29/23 16:10:51.542 -Jul 29 16:10:51.574: INFO: Waiting for pod pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a to disappear -Jul 29 16:10:51.582: INFO: Pod pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a no longer exists -[AfterEach] [sig-storage] ConfigMap + test/e2e/common/storage/projected_configmap.go:57 +STEP: Creating configMap with name projected-configmap-test-volume-9a830cdb-b963-4e4f-a541-e82895b71e18 08/24/23 12:10:35.147 +STEP: Creating a pod to test consume configMaps 08/24/23 12:10:35.156 +Aug 24 12:10:35.173: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7" in namespace "projected-8096" to be "Succeeded or Failed" +Aug 24 12:10:35.180: INFO: Pod "pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7": Phase="Pending", Reason="", readiness=false. Elapsed: 6.677733ms +Aug 24 12:10:37.189: INFO: Pod "pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015507401s +Aug 24 12:10:39.187: INFO: Pod "pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014003534s +STEP: Saw pod success 08/24/23 12:10:39.188 +Aug 24 12:10:39.188: INFO: Pod "pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7" satisfied condition "Succeeded or Failed" +Aug 24 12:10:39.198: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7 container agnhost-container: +STEP: delete the pod 08/24/23 12:10:39.215 +Aug 24 12:10:39.245: INFO: Waiting for pod pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7 to disappear +Aug 24 12:10:39.253: INFO: Pod pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7 no longer exists +[AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:10:51.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 12:10:39.253: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-1656" for this suite. 07/29/23 16:10:51.593 +STEP: Destroying namespace "projected-8096" for this suite. 08/24/23 12:10:39.262 ------------------------------ -• [4.185 seconds] -[sig-storage] ConfigMap +• [4.182 seconds] +[sig-storage] Projected configMap test/e2e/common/storage/framework.go:23 should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:57 + test/e2e/common/storage/projected_configmap.go:57 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:47.421 - Jul 29 16:10:47.421: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:10:47.423 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:47.464 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:47.467 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 12:10:35.094 + Aug 24 12:10:35.094: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:10:35.097 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:35.134 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:35.141 + [BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 [It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:57 - STEP: Creating configMap with name configmap-test-volume-e709e90d-17b2-4b2c-beec-4e185207c436 07/29/23 16:10:47.47 - STEP: Creating a pod to test consume configMaps 07/29/23 16:10:47.48 - Jul 29 16:10:47.498: INFO: Waiting up to 5m0s for pod "pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a" in namespace "configmap-1656" to be "Succeeded or Failed" - Jul 29 16:10:47.505: INFO: Pod "pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.748236ms - Jul 29 16:10:49.513: INFO: Pod "pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014325733s - Jul 29 16:10:51.517: INFO: Pod "pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018353231s - STEP: Saw pod success 07/29/23 16:10:51.517 - Jul 29 16:10:51.519: INFO: Pod "pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a" satisfied condition "Succeeded or Failed" - Jul 29 16:10:51.527: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a container agnhost-container: - STEP: delete the pod 07/29/23 16:10:51.542 - Jul 29 16:10:51.574: INFO: Waiting for pod pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a to disappear - Jul 29 16:10:51.582: INFO: Pod pod-configmaps-dd015140-a605-4a9c-afd3-9b912664680a no longer exists - [AfterEach] [sig-storage] ConfigMap + test/e2e/common/storage/projected_configmap.go:57 + STEP: Creating configMap with name projected-configmap-test-volume-9a830cdb-b963-4e4f-a541-e82895b71e18 08/24/23 12:10:35.147 + STEP: Creating a pod to test consume configMaps 08/24/23 12:10:35.156 + Aug 24 12:10:35.173: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7" in namespace "projected-8096" to be "Succeeded or Failed" + Aug 24 12:10:35.180: INFO: Pod "pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7": Phase="Pending", Reason="", readiness=false. Elapsed: 6.677733ms + Aug 24 12:10:37.189: INFO: Pod "pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015507401s + Aug 24 12:10:39.187: INFO: Pod "pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014003534s + STEP: Saw pod success 08/24/23 12:10:39.188 + Aug 24 12:10:39.188: INFO: Pod "pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7" satisfied condition "Succeeded or Failed" + Aug 24 12:10:39.198: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7 container agnhost-container: + STEP: delete the pod 08/24/23 12:10:39.215 + Aug 24 12:10:39.245: INFO: Waiting for pod pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7 to disappear + Aug 24 12:10:39.253: INFO: Pod pod-projected-configmaps-75d882e1-3c85-4fc7-9288-d1df276836b7 no longer exists + [AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:10:51.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 12:10:39.253: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-1656" for this suite. 07/29/23 16:10:51.593 + STEP: Destroying namespace "projected-8096" for this suite. 08/24/23 12:10:39.262 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSS ------------------------------ [sig-node] Probing container - with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:108 + should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:169 [BeforeEach] [sig-node] Probing container set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:10:51.624 -Jul 29 16:10:51.624: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-probe 07/29/23 16:10:51.628 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:51.661 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:51.666 +STEP: Creating a kubernetes client 08/24/23 12:10:39.28 +Aug 24 12:10:39.280: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-probe 08/24/23 12:10:39.282 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:39.312 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:39.317 [BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-node] Probing container test/e2e/common/node/container_probe.go:63 -[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:108 +[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:169 +STEP: Creating pod liveness-927c9f08-887b-45a0-bda2-115bcb16d40a in namespace container-probe-165 08/24/23 12:10:39.323 +Aug 24 12:10:39.337: INFO: Waiting up to 5m0s for pod "liveness-927c9f08-887b-45a0-bda2-115bcb16d40a" in namespace "container-probe-165" to be "not pending" +Aug 24 12:10:39.343: INFO: Pod "liveness-927c9f08-887b-45a0-bda2-115bcb16d40a": Phase="Pending", Reason="", readiness=false. Elapsed: 5.107688ms +Aug 24 12:10:41.349: INFO: Pod "liveness-927c9f08-887b-45a0-bda2-115bcb16d40a": Phase="Running", Reason="", readiness=true. Elapsed: 2.011671102s +Aug 24 12:10:41.349: INFO: Pod "liveness-927c9f08-887b-45a0-bda2-115bcb16d40a" satisfied condition "not pending" +Aug 24 12:10:41.349: INFO: Started pod liveness-927c9f08-887b-45a0-bda2-115bcb16d40a in namespace container-probe-165 +STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 12:10:41.349 +Aug 24 12:10:41.355: INFO: Initial restart count of pod liveness-927c9f08-887b-45a0-bda2-115bcb16d40a is 0 +Aug 24 12:11:01.513: INFO: Restart count of pod container-probe-165/liveness-927c9f08-887b-45a0-bda2-115bcb16d40a is now 1 (20.157583514s elapsed) +STEP: deleting the pod 08/24/23 12:11:01.513 [AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 -Jul 29 16:11:51.699: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:11:01.543: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 -STEP: Destroying namespace "container-probe-2369" for this suite. 07/29/23 16:11:51.706 +STEP: Destroying namespace "container-probe-165" for this suite. 08/24/23 12:11:01.551 ------------------------------ -• [SLOW TEST] [60.094 seconds] +• [SLOW TEST] [22.284 seconds] [sig-node] Probing container test/e2e/common/node/framework.go:23 - with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:108 + should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:169 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-node] Probing container set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:10:51.624 - Jul 29 16:10:51.624: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-probe 07/29/23 16:10:51.628 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:10:51.661 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:10:51.666 + STEP: Creating a kubernetes client 08/24/23 12:10:39.28 + Aug 24 12:10:39.280: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-probe 08/24/23 12:10:39.282 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:10:39.312 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:10:39.317 [BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-node] Probing container test/e2e/common/node/container_probe.go:63 - [It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:108 + [It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:169 + STEP: Creating pod liveness-927c9f08-887b-45a0-bda2-115bcb16d40a in namespace container-probe-165 08/24/23 12:10:39.323 + Aug 24 12:10:39.337: INFO: Waiting up to 5m0s for pod "liveness-927c9f08-887b-45a0-bda2-115bcb16d40a" in namespace "container-probe-165" to be "not pending" + Aug 24 12:10:39.343: INFO: Pod "liveness-927c9f08-887b-45a0-bda2-115bcb16d40a": Phase="Pending", Reason="", readiness=false. Elapsed: 5.107688ms + Aug 24 12:10:41.349: INFO: Pod "liveness-927c9f08-887b-45a0-bda2-115bcb16d40a": Phase="Running", Reason="", readiness=true. Elapsed: 2.011671102s + Aug 24 12:10:41.349: INFO: Pod "liveness-927c9f08-887b-45a0-bda2-115bcb16d40a" satisfied condition "not pending" + Aug 24 12:10:41.349: INFO: Started pod liveness-927c9f08-887b-45a0-bda2-115bcb16d40a in namespace container-probe-165 + STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 12:10:41.349 + Aug 24 12:10:41.355: INFO: Initial restart count of pod liveness-927c9f08-887b-45a0-bda2-115bcb16d40a is 0 + Aug 24 12:11:01.513: INFO: Restart count of pod container-probe-165/liveness-927c9f08-887b-45a0-bda2-115bcb16d40a is now 1 (20.157583514s elapsed) + STEP: deleting the pod 08/24/23 12:11:01.513 [AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 - Jul 29 16:11:51.699: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:11:01.543: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 - STEP: Destroying namespace "container-probe-2369" for this suite. 07/29/23 16:11:51.706 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-node] Pods - should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:398 -[BeforeEach] [sig-node] Pods - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:11:51.72 -Jul 29 16:11:51.720: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 16:11:51.726 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:11:51.757 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:11:51.761 -[BeforeEach] [sig-node] Pods - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 -[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:398 -STEP: creating the pod 07/29/23 16:11:51.765 -STEP: submitting the pod to kubernetes 07/29/23 16:11:51.766 -Jul 29 16:11:51.783: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6" in namespace "pods-3795" to be "running and ready" -Jul 29 16:11:51.791: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Pending", Reason="", readiness=false. Elapsed: 7.290097ms -Jul 29 16:11:51.791: INFO: The phase of Pod pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:11:53.800: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Running", Reason="", readiness=true. Elapsed: 2.016464366s -Jul 29 16:11:53.800: INFO: The phase of Pod pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6 is Running (Ready = true) -Jul 29 16:11:53.800: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6" satisfied condition "running and ready" -STEP: verifying the pod is in kubernetes 07/29/23 16:11:53.804 -STEP: updating the pod 07/29/23 16:11:53.81 -Jul 29 16:11:54.337: INFO: Successfully updated pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6" -Jul 29 16:11:54.337: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6" in namespace "pods-3795" to be "terminated with reason DeadlineExceeded" -Jul 29 16:11:54.343: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Running", Reason="", readiness=true. Elapsed: 5.780751ms -Jul 29 16:11:56.348: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Running", Reason="", readiness=true. Elapsed: 2.011407228s -Jul 29 16:11:58.351: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Running", Reason="", readiness=false. Elapsed: 4.014246592s -Jul 29 16:12:00.354: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 6.017087868s -Jul 29 16:12:00.354: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6" satisfied condition "terminated with reason DeadlineExceeded" -[AfterEach] [sig-node] Pods - test/e2e/framework/node/init/init.go:32 -Jul 29 16:12:00.354: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods - tear down framework | framework.go:193 -STEP: Destroying namespace "pods-3795" for this suite. 07/29/23 16:12:00.364 ------------------------------- -• [SLOW TEST] [8.655 seconds] -[sig-node] Pods -test/e2e/common/node/framework.go:23 - should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:398 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:11:51.72 - Jul 29 16:11:51.720: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 16:11:51.726 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:11:51.757 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:11:51.761 - [BeforeEach] [sig-node] Pods - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 - [It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:398 - STEP: creating the pod 07/29/23 16:11:51.765 - STEP: submitting the pod to kubernetes 07/29/23 16:11:51.766 - Jul 29 16:11:51.783: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6" in namespace "pods-3795" to be "running and ready" - Jul 29 16:11:51.791: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Pending", Reason="", readiness=false. Elapsed: 7.290097ms - Jul 29 16:11:51.791: INFO: The phase of Pod pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:11:53.800: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Running", Reason="", readiness=true. Elapsed: 2.016464366s - Jul 29 16:11:53.800: INFO: The phase of Pod pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6 is Running (Ready = true) - Jul 29 16:11:53.800: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6" satisfied condition "running and ready" - STEP: verifying the pod is in kubernetes 07/29/23 16:11:53.804 - STEP: updating the pod 07/29/23 16:11:53.81 - Jul 29 16:11:54.337: INFO: Successfully updated pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6" - Jul 29 16:11:54.337: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6" in namespace "pods-3795" to be "terminated with reason DeadlineExceeded" - Jul 29 16:11:54.343: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Running", Reason="", readiness=true. Elapsed: 5.780751ms - Jul 29 16:11:56.348: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Running", Reason="", readiness=true. Elapsed: 2.011407228s - Jul 29 16:11:58.351: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Running", Reason="", readiness=false. Elapsed: 4.014246592s - Jul 29 16:12:00.354: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 6.017087868s - Jul 29 16:12:00.354: INFO: Pod "pod-update-activedeadlineseconds-1b256536-d25d-432f-a3a8-0e33ab0ea3a6" satisfied condition "terminated with reason DeadlineExceeded" - [AfterEach] [sig-node] Pods - test/e2e/framework/node/init/init.go:32 - Jul 29 16:12:00.354: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods - tear down framework | framework.go:193 - STEP: Destroying namespace "pods-3795" for this suite. 07/29/23 16:12:00.364 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for multiple CRDs of different groups [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:276 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:12:00.38 -Jul 29 16:12:00.380: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 16:12:00.382 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:12:00.414 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:12:00.418 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:31 -[It] works for multiple CRDs of different groups [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:276 -STEP: CRs in different groups (two CRDs) show up in OpenAPI documentation 07/29/23 16:12:00.423 -Jul 29 16:12:00.425: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:12:02.858: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:32 -Jul 29 16:12:12.486: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - tear down framework | framework.go:193 -STEP: Destroying namespace "crd-publish-openapi-2853" for this suite. 07/29/23 16:12:12.511 ------------------------------- -• [SLOW TEST] [12.149 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - works for multiple CRDs of different groups [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:276 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:12:00.38 - Jul 29 16:12:00.380: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 16:12:00.382 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:12:00.414 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:12:00.418 - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:31 - [It] works for multiple CRDs of different groups [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:276 - STEP: CRs in different groups (two CRDs) show up in OpenAPI documentation 07/29/23 16:12:00.423 - Jul 29 16:12:00.425: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:12:02.858: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/node/init/init.go:32 - Jul 29 16:12:12.486: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - tear down framework | framework.go:193 - STEP: Destroying namespace "crd-publish-openapi-2853" for this suite. 07/29/23 16:12:12.511 + STEP: Destroying namespace "container-probe-165" for this suite. 08/24/23 12:11:01.551 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +S ------------------------------ -[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] - should perform canary updates and phased rolling updates of template modifications [Conformance] - test/e2e/apps/statefulset.go:317 -[BeforeEach] [sig-apps] StatefulSet +[sig-apps] CronJob + should schedule multiple jobs concurrently [Conformance] + test/e2e/apps/cronjob.go:69 +[BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:12:12.557 -Jul 29 16:12:12.557: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename statefulset 07/29/23 16:12:12.56 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:12:12.596 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:12:12.601 -[BeforeEach] [sig-apps] StatefulSet +STEP: Creating a kubernetes client 08/24/23 12:11:01.565 +Aug 24 12:11:01.565: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename cronjob 08/24/23 12:11:01.568 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:11:01.603 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:11:01.606 +[BeforeEach] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 -[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 -STEP: Creating service test in namespace statefulset-3756 07/29/23 16:12:12.606 -[It] should perform canary updates and phased rolling updates of template modifications [Conformance] - test/e2e/apps/statefulset.go:317 -STEP: Creating a new StatefulSet 07/29/23 16:12:12.619 -Jul 29 16:12:12.635: INFO: Found 0 stateful pods, waiting for 3 -Jul 29 16:12:22.645: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 16:12:22.645: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 16:12:22.646: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true -STEP: Updating stateful set template: update image from registry.k8s.io/e2e-test-images/httpd:2.4.38-4 to registry.k8s.io/e2e-test-images/httpd:2.4.39-4 07/29/23 16:12:22.665 -Jul 29 16:12:22.699: INFO: Updating stateful set ss2 -STEP: Creating a new revision 07/29/23 16:12:22.699 -STEP: Not applying an update when the partition is greater than the number of replicas 07/29/23 16:12:32.728 -STEP: Performing a canary update 07/29/23 16:12:32.728 -Jul 29 16:12:32.758: INFO: Updating stateful set ss2 -Jul 29 16:12:32.771: INFO: Waiting for Pod statefulset-3756/ss2-2 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 -STEP: Restoring Pods to the correct revision when they are deleted 07/29/23 16:12:42.787 -Jul 29 16:12:42.907: INFO: Found 1 stateful pods, waiting for 3 -Jul 29 16:12:52.921: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 16:12:52.921: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 16:12:52.921: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true -STEP: Performing a phased rolling update 07/29/23 16:12:52.934 -Jul 29 16:12:52.965: INFO: Updating stateful set ss2 -Jul 29 16:12:52.978: INFO: Waiting for Pod statefulset-3756/ss2-1 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 -Jul 29 16:13:03.019: INFO: Updating stateful set ss2 -Jul 29 16:13:03.031: INFO: Waiting for StatefulSet statefulset-3756/ss2 to complete update -Jul 29 16:13:03.031: INFO: Waiting for Pod statefulset-3756/ss2-0 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 -[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 -Jul 29 16:13:13.053: INFO: Deleting all statefulset in ns statefulset-3756 -Jul 29 16:13:13.061: INFO: Scaling statefulset ss2 to 0 -Jul 29 16:13:23.095: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 16:13:23.100: INFO: Deleting statefulset ss2 -[AfterEach] [sig-apps] StatefulSet +[It] should schedule multiple jobs concurrently [Conformance] + test/e2e/apps/cronjob.go:69 +STEP: Creating a cronjob 08/24/23 12:11:01.611 +STEP: Ensuring more than one job is running at a time 08/24/23 12:11:01.623 +STEP: Ensuring at least two running jobs exists by listing jobs explicitly 08/24/23 12:13:01.632 +STEP: Removing cronjob 08/24/23 12:13:01.64 +[AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 -Jul 29 16:13:23.129: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] StatefulSet +Aug 24 12:13:01.656: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 -STEP: Destroying namespace "statefulset-3756" for this suite. 07/29/23 16:13:23.139 +STEP: Destroying namespace "cronjob-2294" for this suite. 08/24/23 12:13:01.667 ------------------------------ -• [SLOW TEST] [70.595 seconds] -[sig-apps] StatefulSet +• [SLOW TEST] [120.114 seconds] +[sig-apps] CronJob test/e2e/apps/framework.go:23 - Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:103 - should perform canary updates and phased rolling updates of template modifications [Conformance] - test/e2e/apps/statefulset.go:317 + should schedule multiple jobs concurrently [Conformance] + test/e2e/apps/cronjob.go:69 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] StatefulSet + [BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:12:12.557 - Jul 29 16:12:12.557: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename statefulset 07/29/23 16:12:12.56 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:12:12.596 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:12:12.601 - [BeforeEach] [sig-apps] StatefulSet + STEP: Creating a kubernetes client 08/24/23 12:11:01.565 + Aug 24 12:11:01.565: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename cronjob 08/24/23 12:11:01.568 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:11:01.603 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:11:01.606 + [BeforeEach] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 - [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 - STEP: Creating service test in namespace statefulset-3756 07/29/23 16:12:12.606 - [It] should perform canary updates and phased rolling updates of template modifications [Conformance] - test/e2e/apps/statefulset.go:317 - STEP: Creating a new StatefulSet 07/29/23 16:12:12.619 - Jul 29 16:12:12.635: INFO: Found 0 stateful pods, waiting for 3 - Jul 29 16:12:22.645: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 16:12:22.645: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 16:12:22.646: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true - STEP: Updating stateful set template: update image from registry.k8s.io/e2e-test-images/httpd:2.4.38-4 to registry.k8s.io/e2e-test-images/httpd:2.4.39-4 07/29/23 16:12:22.665 - Jul 29 16:12:22.699: INFO: Updating stateful set ss2 - STEP: Creating a new revision 07/29/23 16:12:22.699 - STEP: Not applying an update when the partition is greater than the number of replicas 07/29/23 16:12:32.728 - STEP: Performing a canary update 07/29/23 16:12:32.728 - Jul 29 16:12:32.758: INFO: Updating stateful set ss2 - Jul 29 16:12:32.771: INFO: Waiting for Pod statefulset-3756/ss2-2 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 - STEP: Restoring Pods to the correct revision when they are deleted 07/29/23 16:12:42.787 - Jul 29 16:12:42.907: INFO: Found 1 stateful pods, waiting for 3 - Jul 29 16:12:52.921: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 16:12:52.921: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 16:12:52.921: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true - STEP: Performing a phased rolling update 07/29/23 16:12:52.934 - Jul 29 16:12:52.965: INFO: Updating stateful set ss2 - Jul 29 16:12:52.978: INFO: Waiting for Pod statefulset-3756/ss2-1 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 - Jul 29 16:13:03.019: INFO: Updating stateful set ss2 - Jul 29 16:13:03.031: INFO: Waiting for StatefulSet statefulset-3756/ss2 to complete update - Jul 29 16:13:03.031: INFO: Waiting for Pod statefulset-3756/ss2-0 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 - [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 - Jul 29 16:13:13.053: INFO: Deleting all statefulset in ns statefulset-3756 - Jul 29 16:13:13.061: INFO: Scaling statefulset ss2 to 0 - Jul 29 16:13:23.095: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 16:13:23.100: INFO: Deleting statefulset ss2 - [AfterEach] [sig-apps] StatefulSet + [It] should schedule multiple jobs concurrently [Conformance] + test/e2e/apps/cronjob.go:69 + STEP: Creating a cronjob 08/24/23 12:11:01.611 + STEP: Ensuring more than one job is running at a time 08/24/23 12:11:01.623 + STEP: Ensuring at least two running jobs exists by listing jobs explicitly 08/24/23 12:13:01.632 + STEP: Removing cronjob 08/24/23 12:13:01.64 + [AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 - Jul 29 16:13:23.129: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] StatefulSet + Aug 24 12:13:01.656: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 - STEP: Destroying namespace "statefulset-3756" for this suite. 07/29/23 16:13:23.139 + STEP: Destroying namespace "cronjob-2294" for this suite. 08/24/23 12:13:01.667 << End Captured GinkgoWriter Output ------------------------------ -SSSSSS +SSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:107 -[BeforeEach] [sig-storage] EmptyDir volumes +[sig-api-machinery] ResourceQuota + should be able to update and delete ResourceQuota. [Conformance] + test/e2e/apimachinery/resource_quota.go:884 +[BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:13:23.155 -Jul 29 16:13:23.155: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 16:13:23.16 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:13:23.196 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:13:23.201 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 12:13:01.687 +Aug 24 12:13:01.688: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 12:13:01.693 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:01.743 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:01.749 +[BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 -[It] should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:107 -STEP: Creating a pod to test emptydir 0666 on tmpfs 07/29/23 16:13:23.208 -Jul 29 16:13:23.221: INFO: Waiting up to 5m0s for pod "pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff" in namespace "emptydir-21" to be "Succeeded or Failed" -Jul 29 16:13:23.227: INFO: Pod "pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff": Phase="Pending", Reason="", readiness=false. Elapsed: 5.473233ms -Jul 29 16:13:25.233: INFO: Pod "pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011886465s -Jul 29 16:13:27.236: INFO: Pod "pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015244812s -STEP: Saw pod success 07/29/23 16:13:27.237 -Jul 29 16:13:27.237: INFO: Pod "pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff" satisfied condition "Succeeded or Failed" -Jul 29 16:13:27.243: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff container test-container: -STEP: delete the pod 07/29/23 16:13:27.279 -Jul 29 16:13:27.301: INFO: Waiting for pod pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff to disappear -Jul 29 16:13:27.306: INFO: Pod pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[It] should be able to update and delete ResourceQuota. [Conformance] + test/e2e/apimachinery/resource_quota.go:884 +STEP: Creating a ResourceQuota 08/24/23 12:13:01.763 +STEP: Getting a ResourceQuota 08/24/23 12:13:01.785 +STEP: Updating a ResourceQuota 08/24/23 12:13:01.792 +STEP: Verifying a ResourceQuota was modified 08/24/23 12:13:01.804 +STEP: Deleting a ResourceQuota 08/24/23 12:13:01.812 +STEP: Verifying the deleted ResourceQuota 08/24/23 12:13:01.824 +[AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 16:13:27.306: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 12:13:01.829: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-21" for this suite. 07/29/23 16:13:27.318 +STEP: Destroying namespace "resourcequota-3" for this suite. 08/24/23 12:13:01.837 ------------------------------ -• [4.178 seconds] -[sig-storage] EmptyDir volumes -test/e2e/common/storage/framework.go:23 - should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:107 +• [0.167 seconds] +[sig-api-machinery] ResourceQuota +test/e2e/apimachinery/framework.go:23 + should be able to update and delete ResourceQuota. [Conformance] + test/e2e/apimachinery/resource_quota.go:884 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:13:23.155 - Jul 29 16:13:23.155: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 16:13:23.16 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:13:23.196 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:13:23.201 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 12:13:01.687 + Aug 24 12:13:01.688: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 12:13:01.693 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:01.743 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:01.749 + [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [It] should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:107 - STEP: Creating a pod to test emptydir 0666 on tmpfs 07/29/23 16:13:23.208 - Jul 29 16:13:23.221: INFO: Waiting up to 5m0s for pod "pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff" in namespace "emptydir-21" to be "Succeeded or Failed" - Jul 29 16:13:23.227: INFO: Pod "pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff": Phase="Pending", Reason="", readiness=false. Elapsed: 5.473233ms - Jul 29 16:13:25.233: INFO: Pod "pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011886465s - Jul 29 16:13:27.236: INFO: Pod "pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015244812s - STEP: Saw pod success 07/29/23 16:13:27.237 - Jul 29 16:13:27.237: INFO: Pod "pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff" satisfied condition "Succeeded or Failed" - Jul 29 16:13:27.243: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff container test-container: - STEP: delete the pod 07/29/23 16:13:27.279 - Jul 29 16:13:27.301: INFO: Waiting for pod pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff to disappear - Jul 29 16:13:27.306: INFO: Pod pod-b5bd70a0-dd18-4fd0-a52d-076a0e9881ff no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [It] should be able to update and delete ResourceQuota. [Conformance] + test/e2e/apimachinery/resource_quota.go:884 + STEP: Creating a ResourceQuota 08/24/23 12:13:01.763 + STEP: Getting a ResourceQuota 08/24/23 12:13:01.785 + STEP: Updating a ResourceQuota 08/24/23 12:13:01.792 + STEP: Verifying a ResourceQuota was modified 08/24/23 12:13:01.804 + STEP: Deleting a ResourceQuota 08/24/23 12:13:01.812 + STEP: Verifying the deleted ResourceQuota 08/24/23 12:13:01.824 + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:13:27.306: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 12:13:01.829: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-21" for this suite. 07/29/23 16:13:27.318 + STEP: Destroying namespace "resourcequota-3" for this suite. 08/24/23 12:13:01.837 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSS +SSSSSSS ------------------------------ -[sig-storage] Subpath Atomic writer volumes - should support subpaths with secret pod [Conformance] - test/e2e/storage/subpath.go:60 -[BeforeEach] [sig-storage] Subpath +[sig-network] Services + should be able to change the type from ClusterIP to ExternalName [Conformance] + test/e2e/network/service.go:1515 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:13:27.337 -Jul 29 16:13:27.338: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename subpath 07/29/23 16:13:27.339 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:13:27.374 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:13:27.379 -[BeforeEach] [sig-storage] Subpath +STEP: Creating a kubernetes client 08/24/23 12:13:01.859 +Aug 24 12:13:01.860: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 12:13:01.864 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:01.902 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:01.907 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] Atomic writer volumes - test/e2e/storage/subpath.go:40 -STEP: Setting up data 07/29/23 16:13:27.383 -[It] should support subpaths with secret pod [Conformance] - test/e2e/storage/subpath.go:60 -STEP: Creating pod pod-subpath-test-secret-v66n 07/29/23 16:13:27.397 -STEP: Creating a pod to test atomic-volume-subpath 07/29/23 16:13:27.398 -Jul 29 16:13:27.414: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-v66n" in namespace "subpath-8690" to be "Succeeded or Failed" -Jul 29 16:13:27.419: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Pending", Reason="", readiness=false. Elapsed: 5.039046ms -Jul 29 16:13:29.426: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 2.0118533s -Jul 29 16:13:31.428: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 4.013657988s -Jul 29 16:13:33.431: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 6.016887571s -Jul 29 16:13:35.428: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 8.013913318s -Jul 29 16:13:37.427: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 10.012957798s -Jul 29 16:13:39.428: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 12.013824444s -Jul 29 16:13:41.427: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 14.012890811s -Jul 29 16:13:43.427: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 16.012737681s -Jul 29 16:13:45.428: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 18.014346318s -Jul 29 16:13:47.426: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 20.012579894s -Jul 29 16:13:49.425: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=false. Elapsed: 22.010597687s -Jul 29 16:13:51.427: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.012988721s -STEP: Saw pod success 07/29/23 16:13:51.427 -Jul 29 16:13:51.427: INFO: Pod "pod-subpath-test-secret-v66n" satisfied condition "Succeeded or Failed" -Jul 29 16:13:51.432: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-subpath-test-secret-v66n container test-container-subpath-secret-v66n: -STEP: delete the pod 07/29/23 16:13:51.453 -Jul 29 16:13:51.480: INFO: Waiting for pod pod-subpath-test-secret-v66n to disappear -Jul 29 16:13:51.486: INFO: Pod pod-subpath-test-secret-v66n no longer exists -STEP: Deleting pod pod-subpath-test-secret-v66n 07/29/23 16:13:51.486 -Jul 29 16:13:51.486: INFO: Deleting pod "pod-subpath-test-secret-v66n" in namespace "subpath-8690" -[AfterEach] [sig-storage] Subpath +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should be able to change the type from ClusterIP to ExternalName [Conformance] + test/e2e/network/service.go:1515 +STEP: creating a service clusterip-service with the type=ClusterIP in namespace services-1080 08/24/23 12:13:01.913 +STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service 08/24/23 12:13:01.933 +STEP: creating service externalsvc in namespace services-1080 08/24/23 12:13:01.933 +STEP: creating replication controller externalsvc in namespace services-1080 08/24/23 12:13:01.962 +I0824 12:13:01.978980 14 runners.go:193] Created replication controller with name: externalsvc, namespace: services-1080, replica count: 2 +I0824 12:13:05.030625 14 runners.go:193] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +STEP: changing the ClusterIP service to type=ExternalName 08/24/23 12:13:05.039 +Aug 24 12:13:05.064: INFO: Creating new exec pod +Aug 24 12:13:05.088: INFO: Waiting up to 5m0s for pod "execpodws98s" in namespace "services-1080" to be "running" +Aug 24 12:13:05.097: INFO: Pod "execpodws98s": Phase="Pending", Reason="", readiness=false. Elapsed: 8.673836ms +Aug 24 12:13:07.108: INFO: Pod "execpodws98s": Phase="Running", Reason="", readiness=true. Elapsed: 2.019625278s +Aug 24 12:13:07.108: INFO: Pod "execpodws98s" satisfied condition "running" +Aug 24 12:13:07.108: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1080 exec execpodws98s -- /bin/sh -x -c nslookup clusterip-service.services-1080.svc.cluster.local' +Aug 24 12:13:07.568: INFO: stderr: "+ nslookup clusterip-service.services-1080.svc.cluster.local\n" +Aug 24 12:13:07.568: INFO: stdout: "Server:\t\t10.233.0.10\nAddress:\t10.233.0.10#53\n\nclusterip-service.services-1080.svc.cluster.local\tcanonical name = externalsvc.services-1080.svc.cluster.local.\nName:\texternalsvc.services-1080.svc.cluster.local\nAddress: 10.233.15.56\n\n" +STEP: deleting ReplicationController externalsvc in namespace services-1080, will wait for the garbage collector to delete the pods 08/24/23 12:13:07.568 +Aug 24 12:13:07.638: INFO: Deleting ReplicationController externalsvc took: 13.236209ms +Aug 24 12:13:07.741: INFO: Terminating ReplicationController externalsvc pods took: 102.779854ms +Aug 24 12:13:09.482: INFO: Cleaning up the ClusterIP to ExternalName test service +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 16:13:51.493: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Subpath +Aug 24 12:13:09.511: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Subpath +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Subpath +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "subpath-8690" for this suite. 07/29/23 16:13:51.503 +STEP: Destroying namespace "services-1080" for this suite. 08/24/23 12:13:09.523 ------------------------------ -• [SLOW TEST] [24.180 seconds] -[sig-storage] Subpath -test/e2e/storage/utils/framework.go:23 - Atomic writer volumes - test/e2e/storage/subpath.go:36 - should support subpaths with secret pod [Conformance] - test/e2e/storage/subpath.go:60 +• [SLOW TEST] [7.678 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should be able to change the type from ClusterIP to ExternalName [Conformance] + test/e2e/network/service.go:1515 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Subpath + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:13:27.337 - Jul 29 16:13:27.338: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename subpath 07/29/23 16:13:27.339 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:13:27.374 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:13:27.379 - [BeforeEach] [sig-storage] Subpath + STEP: Creating a kubernetes client 08/24/23 12:13:01.859 + Aug 24 12:13:01.860: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 12:13:01.864 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:01.902 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:01.907 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] Atomic writer volumes - test/e2e/storage/subpath.go:40 - STEP: Setting up data 07/29/23 16:13:27.383 - [It] should support subpaths with secret pod [Conformance] - test/e2e/storage/subpath.go:60 - STEP: Creating pod pod-subpath-test-secret-v66n 07/29/23 16:13:27.397 - STEP: Creating a pod to test atomic-volume-subpath 07/29/23 16:13:27.398 - Jul 29 16:13:27.414: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-v66n" in namespace "subpath-8690" to be "Succeeded or Failed" - Jul 29 16:13:27.419: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Pending", Reason="", readiness=false. Elapsed: 5.039046ms - Jul 29 16:13:29.426: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 2.0118533s - Jul 29 16:13:31.428: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 4.013657988s - Jul 29 16:13:33.431: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 6.016887571s - Jul 29 16:13:35.428: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 8.013913318s - Jul 29 16:13:37.427: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 10.012957798s - Jul 29 16:13:39.428: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 12.013824444s - Jul 29 16:13:41.427: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 14.012890811s - Jul 29 16:13:43.427: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 16.012737681s - Jul 29 16:13:45.428: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 18.014346318s - Jul 29 16:13:47.426: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=true. Elapsed: 20.012579894s - Jul 29 16:13:49.425: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Running", Reason="", readiness=false. Elapsed: 22.010597687s - Jul 29 16:13:51.427: INFO: Pod "pod-subpath-test-secret-v66n": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.012988721s - STEP: Saw pod success 07/29/23 16:13:51.427 - Jul 29 16:13:51.427: INFO: Pod "pod-subpath-test-secret-v66n" satisfied condition "Succeeded or Failed" - Jul 29 16:13:51.432: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-subpath-test-secret-v66n container test-container-subpath-secret-v66n: - STEP: delete the pod 07/29/23 16:13:51.453 - Jul 29 16:13:51.480: INFO: Waiting for pod pod-subpath-test-secret-v66n to disappear - Jul 29 16:13:51.486: INFO: Pod pod-subpath-test-secret-v66n no longer exists - STEP: Deleting pod pod-subpath-test-secret-v66n 07/29/23 16:13:51.486 - Jul 29 16:13:51.486: INFO: Deleting pod "pod-subpath-test-secret-v66n" in namespace "subpath-8690" - [AfterEach] [sig-storage] Subpath + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should be able to change the type from ClusterIP to ExternalName [Conformance] + test/e2e/network/service.go:1515 + STEP: creating a service clusterip-service with the type=ClusterIP in namespace services-1080 08/24/23 12:13:01.913 + STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service 08/24/23 12:13:01.933 + STEP: creating service externalsvc in namespace services-1080 08/24/23 12:13:01.933 + STEP: creating replication controller externalsvc in namespace services-1080 08/24/23 12:13:01.962 + I0824 12:13:01.978980 14 runners.go:193] Created replication controller with name: externalsvc, namespace: services-1080, replica count: 2 + I0824 12:13:05.030625 14 runners.go:193] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + STEP: changing the ClusterIP service to type=ExternalName 08/24/23 12:13:05.039 + Aug 24 12:13:05.064: INFO: Creating new exec pod + Aug 24 12:13:05.088: INFO: Waiting up to 5m0s for pod "execpodws98s" in namespace "services-1080" to be "running" + Aug 24 12:13:05.097: INFO: Pod "execpodws98s": Phase="Pending", Reason="", readiness=false. Elapsed: 8.673836ms + Aug 24 12:13:07.108: INFO: Pod "execpodws98s": Phase="Running", Reason="", readiness=true. Elapsed: 2.019625278s + Aug 24 12:13:07.108: INFO: Pod "execpodws98s" satisfied condition "running" + Aug 24 12:13:07.108: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1080 exec execpodws98s -- /bin/sh -x -c nslookup clusterip-service.services-1080.svc.cluster.local' + Aug 24 12:13:07.568: INFO: stderr: "+ nslookup clusterip-service.services-1080.svc.cluster.local\n" + Aug 24 12:13:07.568: INFO: stdout: "Server:\t\t10.233.0.10\nAddress:\t10.233.0.10#53\n\nclusterip-service.services-1080.svc.cluster.local\tcanonical name = externalsvc.services-1080.svc.cluster.local.\nName:\texternalsvc.services-1080.svc.cluster.local\nAddress: 10.233.15.56\n\n" + STEP: deleting ReplicationController externalsvc in namespace services-1080, will wait for the garbage collector to delete the pods 08/24/23 12:13:07.568 + Aug 24 12:13:07.638: INFO: Deleting ReplicationController externalsvc took: 13.236209ms + Aug 24 12:13:07.741: INFO: Terminating ReplicationController externalsvc pods took: 102.779854ms + Aug 24 12:13:09.482: INFO: Cleaning up the ClusterIP to ExternalName test service + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 16:13:51.493: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Subpath + Aug 24 12:13:09.511: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Subpath + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Subpath + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "subpath-8690" for this suite. 07/29/23 16:13:51.503 + STEP: Destroying namespace "services-1080" for this suite. 08/24/23 12:13:09.523 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Update Demo - should create and stop a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:339 -[BeforeEach] [sig-cli] Kubectl client +[sig-storage] ConfigMap + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:423 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:13:51.525 -Jul 29 16:13:51.526: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:13:51.529 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:13:51.564 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:13:51.571 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 12:13:09.541 +Aug 24 12:13:09.542: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 12:13:09.548 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:09.579 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:09.587 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[BeforeEach] Update Demo - test/e2e/kubectl/kubectl.go:326 -[It] should create and stop a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:339 -STEP: creating a replication controller 07/29/23 16:13:51.578 -Jul 29 16:13:51.578: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 create -f -' -Jul 29 16:13:52.295: INFO: stderr: "" -Jul 29 16:13:52.295: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" -STEP: waiting for all containers in name=update-demo pods to come up. 07/29/23 16:13:52.295 -Jul 29 16:13:52.295: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Jul 29 16:13:52.498: INFO: stderr: "" -Jul 29 16:13:52.498: INFO: stdout: "update-demo-nautilus-fz5gh update-demo-nautilus-tmxdg " -Jul 29 16:13:52.499: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-fz5gh -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:13:52.666: INFO: stderr: "" -Jul 29 16:13:52.666: INFO: stdout: "" -Jul 29 16:13:52.666: INFO: update-demo-nautilus-fz5gh is created but not running -Jul 29 16:13:57.668: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Jul 29 16:13:57.855: INFO: stderr: "" -Jul 29 16:13:57.855: INFO: stdout: "update-demo-nautilus-fz5gh update-demo-nautilus-tmxdg " -Jul 29 16:13:57.855: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-fz5gh -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:13:57.991: INFO: stderr: "" -Jul 29 16:13:57.991: INFO: stdout: "" -Jul 29 16:13:57.991: INFO: update-demo-nautilus-fz5gh is created but not running -Jul 29 16:14:02.993: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Jul 29 16:14:03.143: INFO: stderr: "" -Jul 29 16:14:03.143: INFO: stdout: "update-demo-nautilus-fz5gh update-demo-nautilus-tmxdg " -Jul 29 16:14:03.144: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-fz5gh -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:14:03.313: INFO: stderr: "" -Jul 29 16:14:03.313: INFO: stdout: "" -Jul 29 16:14:03.313: INFO: update-demo-nautilus-fz5gh is created but not running -Jul 29 16:14:08.314: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Jul 29 16:14:08.492: INFO: stderr: "" -Jul 29 16:14:08.492: INFO: stdout: "update-demo-nautilus-fz5gh update-demo-nautilus-tmxdg " -Jul 29 16:14:08.493: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-fz5gh -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:14:08.641: INFO: stderr: "" -Jul 29 16:14:08.641: INFO: stdout: "true" -Jul 29 16:14:08.641: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-fz5gh -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Jul 29 16:14:08.784: INFO: stderr: "" -Jul 29 16:14:08.784: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" -Jul 29 16:14:08.784: INFO: validating pod update-demo-nautilus-fz5gh -Jul 29 16:14:08.807: INFO: got data: { - "image": "nautilus.jpg" -} - -Jul 29 16:14:08.808: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Jul 29 16:14:08.808: INFO: update-demo-nautilus-fz5gh is verified up and running -Jul 29 16:14:08.808: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-tmxdg -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:14:08.944: INFO: stderr: "" -Jul 29 16:14:08.944: INFO: stdout: "true" -Jul 29 16:14:08.944: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-tmxdg -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Jul 29 16:14:09.099: INFO: stderr: "" -Jul 29 16:14:09.099: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" -Jul 29 16:14:09.099: INFO: validating pod update-demo-nautilus-tmxdg -Jul 29 16:14:09.113: INFO: got data: { - "image": "nautilus.jpg" -} - -Jul 29 16:14:09.113: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Jul 29 16:14:09.113: INFO: update-demo-nautilus-tmxdg is verified up and running -STEP: using delete to clean up resources 07/29/23 16:14:09.113 -Jul 29 16:14:09.114: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 delete --grace-period=0 --force -f -' -Jul 29 16:14:09.264: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Jul 29 16:14:09.264: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" -Jul 29 16:14:09.264: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get rc,svc -l name=update-demo --no-headers' -Jul 29 16:14:09.464: INFO: stderr: "No resources found in kubectl-9889 namespace.\n" -Jul 29 16:14:09.464: INFO: stdout: "" -Jul 29 16:14:09.465: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' -Jul 29 16:14:09.644: INFO: stderr: "" -Jul 29 16:14:09.645: INFO: stdout: "" -[AfterEach] [sig-cli] Kubectl client +[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:423 +STEP: Creating configMap with name configmap-test-volume-b6c32e62-a20e-4daa-9c7c-64ad10551867 08/24/23 12:13:09.591 +STEP: Creating a pod to test consume configMaps 08/24/23 12:13:09.601 +Aug 24 12:13:09.622: INFO: Waiting up to 5m0s for pod "pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde" in namespace "configmap-1119" to be "Succeeded or Failed" +Aug 24 12:13:09.635: INFO: Pod "pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde": Phase="Pending", Reason="", readiness=false. Elapsed: 13.038741ms +Aug 24 12:13:11.644: INFO: Pod "pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022037673s +Aug 24 12:13:13.644: INFO: Pod "pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022357106s +STEP: Saw pod success 08/24/23 12:13:13.644 +Aug 24 12:13:13.645: INFO: Pod "pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde" satisfied condition "Succeeded or Failed" +Aug 24 12:13:13.652: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde container configmap-volume-test: +STEP: delete the pod 08/24/23 12:13:13.683 +Aug 24 12:13:13.704: INFO: Waiting for pod pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde to disappear +Aug 24 12:13:13.709: INFO: Pod pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde no longer exists +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:14:09.645: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 12:13:13.709: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-9889" for this suite. 07/29/23 16:14:09.656 +STEP: Destroying namespace "configmap-1119" for this suite. 08/24/23 12:13:13.717 ------------------------------ -• [SLOW TEST] [18.148 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Update Demo - test/e2e/kubectl/kubectl.go:324 - should create and stop a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:339 +• [4.189 seconds] +[sig-storage] ConfigMap +test/e2e/common/storage/framework.go:23 + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:423 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:13:51.525 - Jul 29 16:13:51.526: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:13:51.529 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:13:51.564 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:13:51.571 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 12:13:09.541 + Aug 24 12:13:09.542: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 12:13:09.548 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:09.579 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:09.587 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [BeforeEach] Update Demo - test/e2e/kubectl/kubectl.go:326 - [It] should create and stop a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:339 - STEP: creating a replication controller 07/29/23 16:13:51.578 - Jul 29 16:13:51.578: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 create -f -' - Jul 29 16:13:52.295: INFO: stderr: "" - Jul 29 16:13:52.295: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" - STEP: waiting for all containers in name=update-demo pods to come up. 07/29/23 16:13:52.295 - Jul 29 16:13:52.295: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' - Jul 29 16:13:52.498: INFO: stderr: "" - Jul 29 16:13:52.498: INFO: stdout: "update-demo-nautilus-fz5gh update-demo-nautilus-tmxdg " - Jul 29 16:13:52.499: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-fz5gh -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:13:52.666: INFO: stderr: "" - Jul 29 16:13:52.666: INFO: stdout: "" - Jul 29 16:13:52.666: INFO: update-demo-nautilus-fz5gh is created but not running - Jul 29 16:13:57.668: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' - Jul 29 16:13:57.855: INFO: stderr: "" - Jul 29 16:13:57.855: INFO: stdout: "update-demo-nautilus-fz5gh update-demo-nautilus-tmxdg " - Jul 29 16:13:57.855: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-fz5gh -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:13:57.991: INFO: stderr: "" - Jul 29 16:13:57.991: INFO: stdout: "" - Jul 29 16:13:57.991: INFO: update-demo-nautilus-fz5gh is created but not running - Jul 29 16:14:02.993: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' - Jul 29 16:14:03.143: INFO: stderr: "" - Jul 29 16:14:03.143: INFO: stdout: "update-demo-nautilus-fz5gh update-demo-nautilus-tmxdg " - Jul 29 16:14:03.144: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-fz5gh -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:14:03.313: INFO: stderr: "" - Jul 29 16:14:03.313: INFO: stdout: "" - Jul 29 16:14:03.313: INFO: update-demo-nautilus-fz5gh is created but not running - Jul 29 16:14:08.314: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' - Jul 29 16:14:08.492: INFO: stderr: "" - Jul 29 16:14:08.492: INFO: stdout: "update-demo-nautilus-fz5gh update-demo-nautilus-tmxdg " - Jul 29 16:14:08.493: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-fz5gh -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:14:08.641: INFO: stderr: "" - Jul 29 16:14:08.641: INFO: stdout: "true" - Jul 29 16:14:08.641: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-fz5gh -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' - Jul 29 16:14:08.784: INFO: stderr: "" - Jul 29 16:14:08.784: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" - Jul 29 16:14:08.784: INFO: validating pod update-demo-nautilus-fz5gh - Jul 29 16:14:08.807: INFO: got data: { - "image": "nautilus.jpg" - } - - Jul 29 16:14:08.808: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . - Jul 29 16:14:08.808: INFO: update-demo-nautilus-fz5gh is verified up and running - Jul 29 16:14:08.808: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-tmxdg -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:14:08.944: INFO: stderr: "" - Jul 29 16:14:08.944: INFO: stdout: "true" - Jul 29 16:14:08.944: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods update-demo-nautilus-tmxdg -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' - Jul 29 16:14:09.099: INFO: stderr: "" - Jul 29 16:14:09.099: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" - Jul 29 16:14:09.099: INFO: validating pod update-demo-nautilus-tmxdg - Jul 29 16:14:09.113: INFO: got data: { - "image": "nautilus.jpg" - } - - Jul 29 16:14:09.113: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . - Jul 29 16:14:09.113: INFO: update-demo-nautilus-tmxdg is verified up and running - STEP: using delete to clean up resources 07/29/23 16:14:09.113 - Jul 29 16:14:09.114: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 delete --grace-period=0 --force -f -' - Jul 29 16:14:09.264: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" - Jul 29 16:14:09.264: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" - Jul 29 16:14:09.264: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get rc,svc -l name=update-demo --no-headers' - Jul 29 16:14:09.464: INFO: stderr: "No resources found in kubectl-9889 namespace.\n" - Jul 29 16:14:09.464: INFO: stdout: "" - Jul 29 16:14:09.465: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-9889 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' - Jul 29 16:14:09.644: INFO: stderr: "" - Jul 29 16:14:09.645: INFO: stdout: "" - [AfterEach] [sig-cli] Kubectl client + [It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:423 + STEP: Creating configMap with name configmap-test-volume-b6c32e62-a20e-4daa-9c7c-64ad10551867 08/24/23 12:13:09.591 + STEP: Creating a pod to test consume configMaps 08/24/23 12:13:09.601 + Aug 24 12:13:09.622: INFO: Waiting up to 5m0s for pod "pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde" in namespace "configmap-1119" to be "Succeeded or Failed" + Aug 24 12:13:09.635: INFO: Pod "pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde": Phase="Pending", Reason="", readiness=false. Elapsed: 13.038741ms + Aug 24 12:13:11.644: INFO: Pod "pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022037673s + Aug 24 12:13:13.644: INFO: Pod "pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022357106s + STEP: Saw pod success 08/24/23 12:13:13.644 + Aug 24 12:13:13.645: INFO: Pod "pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde" satisfied condition "Succeeded or Failed" + Aug 24 12:13:13.652: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde container configmap-volume-test: + STEP: delete the pod 08/24/23 12:13:13.683 + Aug 24 12:13:13.704: INFO: Waiting for pod pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde to disappear + Aug 24 12:13:13.709: INFO: Pod pod-configmaps-665919d4-536d-46cd-bb92-107a64b92bde no longer exists + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:14:09.645: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 12:13:13.709: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-9889" for this suite. 07/29/23 16:14:09.656 + STEP: Destroying namespace "configmap-1119" for this suite. 08/24/23 12:13:13.717 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should provide container's memory request [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:235 -[BeforeEach] [sig-storage] Projected downwardAPI +[sig-node] Secrets + should be consumable from pods in env vars [NodeConformance] [Conformance] + test/e2e/common/node/secrets.go:46 +[BeforeEach] [sig-node] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:14:09.675 -Jul 29 16:14:09.675: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:14:09.681 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:09.713 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:09.72 -[BeforeEach] [sig-storage] Projected downwardAPI +STEP: Creating a kubernetes client 08/24/23 12:13:13.733 +Aug 24 12:13:13.733: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 12:13:13.735 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:13.762 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:13.766 +[BeforeEach] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should provide container's memory request [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:235 -STEP: Creating a pod to test downward API volume plugin 07/29/23 16:14:09.726 -Jul 29 16:14:09.748: INFO: Waiting up to 5m0s for pod "downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a" in namespace "projected-1207" to be "Succeeded or Failed" -Jul 29 16:14:09.754: INFO: Pod "downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a": Phase="Pending", Reason="", readiness=false. Elapsed: 5.884895ms -Jul 29 16:14:11.767: INFO: Pod "downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018781311s -Jul 29 16:14:13.772: INFO: Pod "downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02444385s -STEP: Saw pod success 07/29/23 16:14:13.772 -Jul 29 16:14:13.773: INFO: Pod "downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a" satisfied condition "Succeeded or Failed" -Jul 29 16:14:13.785: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a container client-container: -STEP: delete the pod 07/29/23 16:14:13.806 -Jul 29 16:14:13.832: INFO: Waiting for pod downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a to disappear -Jul 29 16:14:13.838: INFO: Pod downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +[It] should be consumable from pods in env vars [NodeConformance] [Conformance] + test/e2e/common/node/secrets.go:46 +STEP: Creating secret with name secret-test-31bf844d-597a-454b-9424-ac47410c966f 08/24/23 12:13:13.77 +STEP: Creating a pod to test consume secrets 08/24/23 12:13:13.778 +Aug 24 12:13:13.791: INFO: Waiting up to 5m0s for pod "pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f" in namespace "secrets-4426" to be "Succeeded or Failed" +Aug 24 12:13:13.798: INFO: Pod "pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f": Phase="Pending", Reason="", readiness=false. Elapsed: 7.9004ms +Aug 24 12:13:15.809: INFO: Pod "pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018114195s +Aug 24 12:13:17.811: INFO: Pod "pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020905295s +STEP: Saw pod success 08/24/23 12:13:17.812 +Aug 24 12:13:17.812: INFO: Pod "pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f" satisfied condition "Succeeded or Failed" +Aug 24 12:13:17.822: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f container secret-env-test: +STEP: delete the pod 08/24/23 12:13:17.842 +Aug 24 12:13:17.870: INFO: Waiting for pod pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f to disappear +Aug 24 12:13:17.875: INFO: Pod pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f no longer exists +[AfterEach] [sig-node] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 16:14:13.838: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +Aug 24 12:13:17.875: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-node] Secrets dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-node] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "projected-1207" for this suite. 07/29/23 16:14:13.846 +STEP: Destroying namespace "secrets-4426" for this suite. 08/24/23 12:13:17.884 ------------------------------ -• [4.182 seconds] -[sig-storage] Projected downwardAPI -test/e2e/common/storage/framework.go:23 - should provide container's memory request [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:235 +• [4.166 seconds] +[sig-node] Secrets +test/e2e/common/node/framework.go:23 + should be consumable from pods in env vars [NodeConformance] [Conformance] + test/e2e/common/node/secrets.go:46 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-node] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:14:09.675 - Jul 29 16:14:09.675: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:14:09.681 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:09.713 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:09.72 - [BeforeEach] [sig-storage] Projected downwardAPI + STEP: Creating a kubernetes client 08/24/23 12:13:13.733 + Aug 24 12:13:13.733: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 12:13:13.735 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:13.762 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:13.766 + [BeforeEach] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should provide container's memory request [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:235 - STEP: Creating a pod to test downward API volume plugin 07/29/23 16:14:09.726 - Jul 29 16:14:09.748: INFO: Waiting up to 5m0s for pod "downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a" in namespace "projected-1207" to be "Succeeded or Failed" - Jul 29 16:14:09.754: INFO: Pod "downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a": Phase="Pending", Reason="", readiness=false. Elapsed: 5.884895ms - Jul 29 16:14:11.767: INFO: Pod "downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018781311s - Jul 29 16:14:13.772: INFO: Pod "downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02444385s - STEP: Saw pod success 07/29/23 16:14:13.772 - Jul 29 16:14:13.773: INFO: Pod "downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a" satisfied condition "Succeeded or Failed" - Jul 29 16:14:13.785: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a container client-container: - STEP: delete the pod 07/29/23 16:14:13.806 - Jul 29 16:14:13.832: INFO: Waiting for pod downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a to disappear - Jul 29 16:14:13.838: INFO: Pod downwardapi-volume-74341275-4b9b-42c5-97d2-0a856f9d688a no longer exists - [AfterEach] [sig-storage] Projected downwardAPI + [It] should be consumable from pods in env vars [NodeConformance] [Conformance] + test/e2e/common/node/secrets.go:46 + STEP: Creating secret with name secret-test-31bf844d-597a-454b-9424-ac47410c966f 08/24/23 12:13:13.77 + STEP: Creating a pod to test consume secrets 08/24/23 12:13:13.778 + Aug 24 12:13:13.791: INFO: Waiting up to 5m0s for pod "pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f" in namespace "secrets-4426" to be "Succeeded or Failed" + Aug 24 12:13:13.798: INFO: Pod "pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f": Phase="Pending", Reason="", readiness=false. Elapsed: 7.9004ms + Aug 24 12:13:15.809: INFO: Pod "pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018114195s + Aug 24 12:13:17.811: INFO: Pod "pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020905295s + STEP: Saw pod success 08/24/23 12:13:17.812 + Aug 24 12:13:17.812: INFO: Pod "pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f" satisfied condition "Succeeded or Failed" + Aug 24 12:13:17.822: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f container secret-env-test: + STEP: delete the pod 08/24/23 12:13:17.842 + Aug 24 12:13:17.870: INFO: Waiting for pod pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f to disappear + Aug 24 12:13:17.875: INFO: Pod pod-secrets-d3af6ef2-c97c-4f52-afef-da0fc1ff5d9f no longer exists + [AfterEach] [sig-node] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 16:14:13.838: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 12:13:17.875: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-node] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-node] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "projected-1207" for this suite. 07/29/23 16:14:13.846 + STEP: Destroying namespace "secrets-4426" for this suite. 08/24/23 12:13:17.884 << End Captured GinkgoWriter Output ------------------------------ SSSSSSS ------------------------------ -[sig-api-machinery] Garbage collector - should not be blocked by dependency circle [Conformance] - test/e2e/apimachinery/garbage_collector.go:849 -[BeforeEach] [sig-api-machinery] Garbage collector +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD without validation schema [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:153 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:14:13.859 -Jul 29 16:14:13.859: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename gc 07/29/23 16:14:13.862 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:13.89 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:13.897 -[BeforeEach] [sig-api-machinery] Garbage collector +STEP: Creating a kubernetes client 08/24/23 12:13:17.903 +Aug 24 12:13:17.904: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:13:17.906 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:17.936 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:17.946 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should not be blocked by dependency circle [Conformance] - test/e2e/apimachinery/garbage_collector.go:849 -Jul 29 16:14:13.960: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"82f1c877-7b60-4eec-bd0d-d463359a6f99", Controller:(*bool)(0xc003930fb6), BlockOwnerDeletion:(*bool)(0xc003930fb7)}} -Jul 29 16:14:13.980: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"2ecd862f-eeb8-4dad-8ce0-39785c5e8694", Controller:(*bool)(0xc0039311de), BlockOwnerDeletion:(*bool)(0xc0039311df)}} -Jul 29 16:14:13.991: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"c4b96b97-f47b-473c-aa33-d29a6ffee668", Controller:(*bool)(0xc003931416), BlockOwnerDeletion:(*bool)(0xc003931417)}} -[AfterEach] [sig-api-machinery] Garbage collector +[It] works for CRD without validation schema [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:153 +Aug 24 12:13:17.952: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 08/24/23 12:13:20.633 +Aug 24 12:13:20.633: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-1046 --namespace=crd-publish-openapi-1046 create -f -' +Aug 24 12:13:22.056: INFO: stderr: "" +Aug 24 12:13:22.056: INFO: stdout: "e2e-test-crd-publish-openapi-9137-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" +Aug 24 12:13:22.057: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-1046 --namespace=crd-publish-openapi-1046 delete e2e-test-crd-publish-openapi-9137-crds test-cr' +Aug 24 12:13:22.235: INFO: stderr: "" +Aug 24 12:13:22.235: INFO: stdout: "e2e-test-crd-publish-openapi-9137-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" +Aug 24 12:13:22.236: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-1046 --namespace=crd-publish-openapi-1046 apply -f -' +Aug 24 12:13:22.675: INFO: stderr: "" +Aug 24 12:13:22.675: INFO: stdout: "e2e-test-crd-publish-openapi-9137-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" +Aug 24 12:13:22.675: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-1046 --namespace=crd-publish-openapi-1046 delete e2e-test-crd-publish-openapi-9137-crds test-cr' +Aug 24 12:13:22.814: INFO: stderr: "" +Aug 24 12:13:22.814: INFO: stdout: "e2e-test-crd-publish-openapi-9137-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" +STEP: kubectl explain works to explain CR without validation schema 08/24/23 12:13:22.814 +Aug 24 12:13:22.814: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-1046 explain e2e-test-crd-publish-openapi-9137-crds' +Aug 24 12:13:24.256: INFO: stderr: "" +Aug 24 12:13:24.256: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-9137-crd\nVERSION: crd-publish-openapi-test-empty.example.com/v1\n\nDESCRIPTION:\n \n" +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:14:19.018: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +Aug 24 12:13:26.802: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "gc-3895" for this suite. 07/29/23 16:14:19.031 +STEP: Destroying namespace "crd-publish-openapi-1046" for this suite. 08/24/23 12:13:26.819 ------------------------------ -• [SLOW TEST] [5.183 seconds] -[sig-api-machinery] Garbage collector +• [SLOW TEST] [8.927 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/apimachinery/framework.go:23 - should not be blocked by dependency circle [Conformance] - test/e2e/apimachinery/garbage_collector.go:849 + works for CRD without validation schema [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:153 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Garbage collector + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:14:13.859 - Jul 29 16:14:13.859: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename gc 07/29/23 16:14:13.862 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:13.89 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:13.897 - [BeforeEach] [sig-api-machinery] Garbage collector + STEP: Creating a kubernetes client 08/24/23 12:13:17.903 + Aug 24 12:13:17.904: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:13:17.906 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:17.936 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:17.946 + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should not be blocked by dependency circle [Conformance] - test/e2e/apimachinery/garbage_collector.go:849 - Jul 29 16:14:13.960: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"82f1c877-7b60-4eec-bd0d-d463359a6f99", Controller:(*bool)(0xc003930fb6), BlockOwnerDeletion:(*bool)(0xc003930fb7)}} - Jul 29 16:14:13.980: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"2ecd862f-eeb8-4dad-8ce0-39785c5e8694", Controller:(*bool)(0xc0039311de), BlockOwnerDeletion:(*bool)(0xc0039311df)}} - Jul 29 16:14:13.991: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"c4b96b97-f47b-473c-aa33-d29a6ffee668", Controller:(*bool)(0xc003931416), BlockOwnerDeletion:(*bool)(0xc003931417)}} - [AfterEach] [sig-api-machinery] Garbage collector + [It] works for CRD without validation schema [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:153 + Aug 24 12:13:17.952: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 08/24/23 12:13:20.633 + Aug 24 12:13:20.633: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-1046 --namespace=crd-publish-openapi-1046 create -f -' + Aug 24 12:13:22.056: INFO: stderr: "" + Aug 24 12:13:22.056: INFO: stdout: "e2e-test-crd-publish-openapi-9137-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" + Aug 24 12:13:22.057: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-1046 --namespace=crd-publish-openapi-1046 delete e2e-test-crd-publish-openapi-9137-crds test-cr' + Aug 24 12:13:22.235: INFO: stderr: "" + Aug 24 12:13:22.235: INFO: stdout: "e2e-test-crd-publish-openapi-9137-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" + Aug 24 12:13:22.236: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-1046 --namespace=crd-publish-openapi-1046 apply -f -' + Aug 24 12:13:22.675: INFO: stderr: "" + Aug 24 12:13:22.675: INFO: stdout: "e2e-test-crd-publish-openapi-9137-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" + Aug 24 12:13:22.675: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-1046 --namespace=crd-publish-openapi-1046 delete e2e-test-crd-publish-openapi-9137-crds test-cr' + Aug 24 12:13:22.814: INFO: stderr: "" + Aug 24 12:13:22.814: INFO: stdout: "e2e-test-crd-publish-openapi-9137-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" + STEP: kubectl explain works to explain CR without validation schema 08/24/23 12:13:22.814 + Aug 24 12:13:22.814: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-1046 explain e2e-test-crd-publish-openapi-9137-crds' + Aug 24 12:13:24.256: INFO: stderr: "" + Aug 24 12:13:24.256: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-9137-crd\nVERSION: crd-publish-openapi-test-empty.example.com/v1\n\nDESCRIPTION:\n \n" + [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:14:19.018: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + Aug 24 12:13:26.802: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "gc-3895" for this suite. 07/29/23 16:14:19.031 + STEP: Destroying namespace "crd-publish-openapi-1046" for this suite. 08/24/23 12:13:26.819 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Downward API volume - should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:68 -[BeforeEach] [sig-storage] Downward API volume +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + should perform canary updates and phased rolling updates of template modifications [Conformance] + test/e2e/apps/statefulset.go:317 +[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:14:19.046 -Jul 29 16:14:19.047: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 16:14:19.048 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:19.081 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:19.086 -[BeforeEach] [sig-storage] Downward API volume +STEP: Creating a kubernetes client 08/24/23 12:13:26.838 +Aug 24 12:13:26.838: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename statefulset 08/24/23 12:13:26.84 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:26.869 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:26.872 +[BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 -[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:68 -STEP: Creating a pod to test downward API volume plugin 07/29/23 16:14:19.09 -Jul 29 16:14:19.103: INFO: Waiting up to 5m0s for pod "downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498" in namespace "downward-api-289" to be "Succeeded or Failed" -Jul 29 16:14:19.109: INFO: Pod "downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498": Phase="Pending", Reason="", readiness=false. Elapsed: 5.731297ms -Jul 29 16:14:21.116: INFO: Pod "downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013602253s -Jul 29 16:14:23.116: INFO: Pod "downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013179586s -STEP: Saw pod success 07/29/23 16:14:23.116 -Jul 29 16:14:23.117: INFO: Pod "downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498" satisfied condition "Succeeded or Failed" -Jul 29 16:14:23.122: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498 container client-container: -STEP: delete the pod 07/29/23 16:14:23.135 -Jul 29 16:14:23.160: INFO: Waiting for pod downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498 to disappear -Jul 29 16:14:23.164: INFO: Pod downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498 no longer exists -[AfterEach] [sig-storage] Downward API volume +[BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 +STEP: Creating service test in namespace statefulset-3788 08/24/23 12:13:26.876 +[It] should perform canary updates and phased rolling updates of template modifications [Conformance] + test/e2e/apps/statefulset.go:317 +STEP: Creating a new StatefulSet 08/24/23 12:13:26.884 +Aug 24 12:13:26.917: INFO: Found 0 stateful pods, waiting for 3 +Aug 24 12:13:36.930: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 12:13:36.930: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 12:13:36.930: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Updating stateful set template: update image from registry.k8s.io/e2e-test-images/httpd:2.4.38-4 to registry.k8s.io/e2e-test-images/httpd:2.4.39-4 08/24/23 12:13:36.949 +Aug 24 12:13:36.976: INFO: Updating stateful set ss2 +STEP: Creating a new revision 08/24/23 12:13:36.976 +STEP: Not applying an update when the partition is greater than the number of replicas 08/24/23 12:13:47.058 +STEP: Performing a canary update 08/24/23 12:13:47.059 +Aug 24 12:13:47.093: INFO: Updating stateful set ss2 +Aug 24 12:13:47.122: INFO: Waiting for Pod statefulset-3788/ss2-2 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 +STEP: Restoring Pods to the correct revision when they are deleted 08/24/23 12:13:57.138 +Aug 24 12:13:57.214: INFO: Found 1 stateful pods, waiting for 3 +Aug 24 12:14:07.225: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 12:14:07.225: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 12:14:07.225: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Performing a phased rolling update 08/24/23 12:14:07.239 +Aug 24 12:14:07.267: INFO: Updating stateful set ss2 +Aug 24 12:14:07.344: INFO: Waiting for Pod statefulset-3788/ss2-1 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 +Aug 24 12:14:17.393: INFO: Updating stateful set ss2 +Aug 24 12:14:17.412: INFO: Waiting for StatefulSet statefulset-3788/ss2 to complete update +Aug 24 12:14:17.413: INFO: Waiting for Pod statefulset-3788/ss2-0 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 +Aug 24 12:14:27.445: INFO: Deleting all statefulset in ns statefulset-3788 +Aug 24 12:14:27.452: INFO: Scaling statefulset ss2 to 0 +Aug 24 12:14:37.490: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 12:14:37.497: INFO: Deleting statefulset ss2 +[AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 -Jul 29 16:14:23.164: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Downward API volume +Aug 24 12:14:37.524: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-289" for this suite. 07/29/23 16:14:23.172 +STEP: Destroying namespace "statefulset-3788" for this suite. 08/24/23 12:14:37.551 ------------------------------ -• [4.135 seconds] -[sig-storage] Downward API volume -test/e2e/common/storage/framework.go:23 - should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:68 +• [SLOW TEST] [70.727 seconds] +[sig-apps] StatefulSet +test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:103 + should perform canary updates and phased rolling updates of template modifications [Conformance] + test/e2e/apps/statefulset.go:317 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Downward API volume + [BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:14:19.046 - Jul 29 16:14:19.047: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 16:14:19.048 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:19.081 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:19.086 - [BeforeEach] [sig-storage] Downward API volume + STEP: Creating a kubernetes client 08/24/23 12:13:26.838 + Aug 24 12:13:26.838: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename statefulset 08/24/23 12:13:26.84 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:13:26.869 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:13:26.872 + [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 - [It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:68 - STEP: Creating a pod to test downward API volume plugin 07/29/23 16:14:19.09 - Jul 29 16:14:19.103: INFO: Waiting up to 5m0s for pod "downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498" in namespace "downward-api-289" to be "Succeeded or Failed" - Jul 29 16:14:19.109: INFO: Pod "downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498": Phase="Pending", Reason="", readiness=false. Elapsed: 5.731297ms - Jul 29 16:14:21.116: INFO: Pod "downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013602253s - Jul 29 16:14:23.116: INFO: Pod "downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013179586s - STEP: Saw pod success 07/29/23 16:14:23.116 - Jul 29 16:14:23.117: INFO: Pod "downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498" satisfied condition "Succeeded or Failed" - Jul 29 16:14:23.122: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498 container client-container: - STEP: delete the pod 07/29/23 16:14:23.135 - Jul 29 16:14:23.160: INFO: Waiting for pod downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498 to disappear - Jul 29 16:14:23.164: INFO: Pod downwardapi-volume-8ccfd14b-437f-467c-bc13-231a04ec1498 no longer exists - [AfterEach] [sig-storage] Downward API volume + [BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 + [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 + STEP: Creating service test in namespace statefulset-3788 08/24/23 12:13:26.876 + [It] should perform canary updates and phased rolling updates of template modifications [Conformance] + test/e2e/apps/statefulset.go:317 + STEP: Creating a new StatefulSet 08/24/23 12:13:26.884 + Aug 24 12:13:26.917: INFO: Found 0 stateful pods, waiting for 3 + Aug 24 12:13:36.930: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 12:13:36.930: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 12:13:36.930: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true + STEP: Updating stateful set template: update image from registry.k8s.io/e2e-test-images/httpd:2.4.38-4 to registry.k8s.io/e2e-test-images/httpd:2.4.39-4 08/24/23 12:13:36.949 + Aug 24 12:13:36.976: INFO: Updating stateful set ss2 + STEP: Creating a new revision 08/24/23 12:13:36.976 + STEP: Not applying an update when the partition is greater than the number of replicas 08/24/23 12:13:47.058 + STEP: Performing a canary update 08/24/23 12:13:47.059 + Aug 24 12:13:47.093: INFO: Updating stateful set ss2 + Aug 24 12:13:47.122: INFO: Waiting for Pod statefulset-3788/ss2-2 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 + STEP: Restoring Pods to the correct revision when they are deleted 08/24/23 12:13:57.138 + Aug 24 12:13:57.214: INFO: Found 1 stateful pods, waiting for 3 + Aug 24 12:14:07.225: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 12:14:07.225: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 12:14:07.225: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true + STEP: Performing a phased rolling update 08/24/23 12:14:07.239 + Aug 24 12:14:07.267: INFO: Updating stateful set ss2 + Aug 24 12:14:07.344: INFO: Waiting for Pod statefulset-3788/ss2-1 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 + Aug 24 12:14:17.393: INFO: Updating stateful set ss2 + Aug 24 12:14:17.412: INFO: Waiting for StatefulSet statefulset-3788/ss2 to complete update + Aug 24 12:14:17.413: INFO: Waiting for Pod statefulset-3788/ss2-0 to have revision ss2-5459d8585b update revision ss2-7b6c9599d5 + [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 + Aug 24 12:14:27.445: INFO: Deleting all statefulset in ns statefulset-3788 + Aug 24 12:14:27.452: INFO: Scaling statefulset ss2 to 0 + Aug 24 12:14:37.490: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 12:14:37.497: INFO: Deleting statefulset ss2 + [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 - Jul 29 16:14:23.164: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Downward API volume + Aug 24 12:14:37.524: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-289" for this suite. 07/29/23 16:14:23.172 + STEP: Destroying namespace "statefulset-3788" for this suite. 08/24/23 12:14:37.551 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] Daemon set [Serial] - should run and stop complex daemon [Conformance] - test/e2e/apps/daemon_set.go:205 -[BeforeEach] [sig-apps] Daemon set [Serial] +[sig-storage] EmptyDir volumes + should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:117 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:14:23.192 -Jul 29 16:14:23.192: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename daemonsets 07/29/23 16:14:23.193 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:23.221 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:23.226 -[BeforeEach] [sig-apps] Daemon set [Serial] +STEP: Creating a kubernetes client 08/24/23 12:14:37.576 +Aug 24 12:14:37.576: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:14:37.579 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:14:37.611 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:14:37.615 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 -[It] should run and stop complex daemon [Conformance] - test/e2e/apps/daemon_set.go:205 -Jul 29 16:14:23.258: INFO: Creating daemon "daemon-set" with a node selector -STEP: Initially, daemon pods should not be running on any nodes. 07/29/23 16:14:23.268 -Jul 29 16:14:23.274: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:23.274: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set -STEP: Change node label to blue, check that daemon pod is launched. 07/29/23 16:14:23.274 -Jul 29 16:14:23.310: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:23.311: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:14:24.320: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:24.320: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:14:25.322: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:25.323: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:14:26.317: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:26.317: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:14:27.328: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 -Jul 29 16:14:27.328: INFO: Number of running nodes: 1, number of available pods: 1 in daemonset daemon-set -STEP: Update the node label to green, and wait for daemons to be unscheduled 07/29/23 16:14:27.333 -Jul 29 16:14:27.360: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 -Jul 29 16:14:27.360: INFO: Number of running nodes: 0, number of available pods: 1 in daemonset daemon-set -Jul 29 16:14:28.368: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:28.368: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set -STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate 07/29/23 16:14:28.369 -Jul 29 16:14:28.392: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:28.392: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:14:29.400: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:29.400: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:14:30.399: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:30.399: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:14:31.403: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:31.403: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:14:32.402: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 -Jul 29 16:14:32.402: INFO: Number of running nodes: 1, number of available pods: 1 in daemonset daemon-set -[AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 -STEP: Deleting DaemonSet "daemon-set" 07/29/23 16:14:32.412 -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-92, will wait for the garbage collector to delete the pods 07/29/23 16:14:32.413 -Jul 29 16:14:32.482: INFO: Deleting DaemonSet.extensions daemon-set took: 13.012945ms -Jul 29 16:14:32.582: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.323948ms -Jul 29 16:14:34.693: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:14:34.693: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set -Jul 29 16:14:34.699: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"20453"},"items":null} - -Jul 29 16:14:34.705: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"20453"},"items":null} - -[AfterEach] [sig-apps] Daemon set [Serial] +[It] should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:117 +STEP: Creating a pod to test emptydir 0777 on tmpfs 08/24/23 12:14:37.621 +Aug 24 12:14:37.638: INFO: Waiting up to 5m0s for pod "pod-edca0feb-4034-404a-a542-9f105df0d971" in namespace "emptydir-3176" to be "Succeeded or Failed" +Aug 24 12:14:37.650: INFO: Pod "pod-edca0feb-4034-404a-a542-9f105df0d971": Phase="Pending", Reason="", readiness=false. Elapsed: 11.8767ms +Aug 24 12:14:39.660: INFO: Pod "pod-edca0feb-4034-404a-a542-9f105df0d971": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022002383s +Aug 24 12:14:41.662: INFO: Pod "pod-edca0feb-4034-404a-a542-9f105df0d971": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024074548s +STEP: Saw pod success 08/24/23 12:14:41.662 +Aug 24 12:14:41.663: INFO: Pod "pod-edca0feb-4034-404a-a542-9f105df0d971" satisfied condition "Succeeded or Failed" +Aug 24 12:14:41.678: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-edca0feb-4034-404a-a542-9f105df0d971 container test-container: +STEP: delete the pod 08/24/23 12:14:41.717 +Aug 24 12:14:41.758: INFO: Waiting for pod pod-edca0feb-4034-404a-a542-9f105df0d971 to disappear +Aug 24 12:14:41.765: INFO: Pod pod-edca0feb-4034-404a-a542-9f105df0d971 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:14:34.752: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +Aug 24 12:14:41.765: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "daemonsets-92" for this suite. 07/29/23 16:14:34.769 +STEP: Destroying namespace "emptydir-3176" for this suite. 08/24/23 12:14:41.776 ------------------------------ -• [SLOW TEST] [11.596 seconds] -[sig-apps] Daemon set [Serial] -test/e2e/apps/framework.go:23 - should run and stop complex daemon [Conformance] - test/e2e/apps/daemon_set.go:205 +• [4.214 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:117 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Daemon set [Serial] + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:14:23.192 - Jul 29 16:14:23.192: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename daemonsets 07/29/23 16:14:23.193 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:23.221 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:23.226 - [BeforeEach] [sig-apps] Daemon set [Serial] + STEP: Creating a kubernetes client 08/24/23 12:14:37.576 + Aug 24 12:14:37.576: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:14:37.579 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:14:37.611 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:14:37.615 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 - [It] should run and stop complex daemon [Conformance] - test/e2e/apps/daemon_set.go:205 - Jul 29 16:14:23.258: INFO: Creating daemon "daemon-set" with a node selector - STEP: Initially, daemon pods should not be running on any nodes. 07/29/23 16:14:23.268 - Jul 29 16:14:23.274: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:23.274: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set - STEP: Change node label to blue, check that daemon pod is launched. 07/29/23 16:14:23.274 - Jul 29 16:14:23.310: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:23.311: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:14:24.320: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:24.320: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:14:25.322: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:25.323: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:14:26.317: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:26.317: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:14:27.328: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 - Jul 29 16:14:27.328: INFO: Number of running nodes: 1, number of available pods: 1 in daemonset daemon-set - STEP: Update the node label to green, and wait for daemons to be unscheduled 07/29/23 16:14:27.333 - Jul 29 16:14:27.360: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 - Jul 29 16:14:27.360: INFO: Number of running nodes: 0, number of available pods: 1 in daemonset daemon-set - Jul 29 16:14:28.368: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:28.368: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set - STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate 07/29/23 16:14:28.369 - Jul 29 16:14:28.392: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:28.392: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:14:29.400: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:29.400: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:14:30.399: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:30.399: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:14:31.403: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:31.403: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:14:32.402: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 - Jul 29 16:14:32.402: INFO: Number of running nodes: 1, number of available pods: 1 in daemonset daemon-set - [AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 - STEP: Deleting DaemonSet "daemon-set" 07/29/23 16:14:32.412 - STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-92, will wait for the garbage collector to delete the pods 07/29/23 16:14:32.413 - Jul 29 16:14:32.482: INFO: Deleting DaemonSet.extensions daemon-set took: 13.012945ms - Jul 29 16:14:32.582: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.323948ms - Jul 29 16:14:34.693: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:14:34.693: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set - Jul 29 16:14:34.699: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"20453"},"items":null} - - Jul 29 16:14:34.705: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"20453"},"items":null} - - [AfterEach] [sig-apps] Daemon set [Serial] + [It] should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:117 + STEP: Creating a pod to test emptydir 0777 on tmpfs 08/24/23 12:14:37.621 + Aug 24 12:14:37.638: INFO: Waiting up to 5m0s for pod "pod-edca0feb-4034-404a-a542-9f105df0d971" in namespace "emptydir-3176" to be "Succeeded or Failed" + Aug 24 12:14:37.650: INFO: Pod "pod-edca0feb-4034-404a-a542-9f105df0d971": Phase="Pending", Reason="", readiness=false. Elapsed: 11.8767ms + Aug 24 12:14:39.660: INFO: Pod "pod-edca0feb-4034-404a-a542-9f105df0d971": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022002383s + Aug 24 12:14:41.662: INFO: Pod "pod-edca0feb-4034-404a-a542-9f105df0d971": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024074548s + STEP: Saw pod success 08/24/23 12:14:41.662 + Aug 24 12:14:41.663: INFO: Pod "pod-edca0feb-4034-404a-a542-9f105df0d971" satisfied condition "Succeeded or Failed" + Aug 24 12:14:41.678: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-edca0feb-4034-404a-a542-9f105df0d971 container test-container: + STEP: delete the pod 08/24/23 12:14:41.717 + Aug 24 12:14:41.758: INFO: Waiting for pod pod-edca0feb-4034-404a-a542-9f105df0d971 to disappear + Aug 24 12:14:41.765: INFO: Pod pod-edca0feb-4034-404a-a542-9f105df0d971 no longer exists + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:14:34.752: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + Aug 24 12:14:41.765: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "daemonsets-92" for this suite. 07/29/23 16:14:34.769 + STEP: Destroying namespace "emptydir-3176" for this suite. 08/24/23 12:14:41.776 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-apps] ReplicationController - should surface a failure condition on a common issue like exceeded quota [Conformance] - test/e2e/apps/rc.go:83 -[BeforeEach] [sig-apps] ReplicationController +[sig-storage] CSIInlineVolumes + should support ephemeral VolumeLifecycleMode in CSIDriver API [Conformance] + test/e2e/storage/csi_inline.go:46 +[BeforeEach] [sig-storage] CSIInlineVolumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:14:34.793 -Jul 29 16:14:34.794: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replication-controller 07/29/23 16:14:34.796 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:34.84 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:34.847 -[BeforeEach] [sig-apps] ReplicationController +STEP: Creating a kubernetes client 08/24/23 12:14:41.792 +Aug 24 12:14:41.792: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename csiinlinevolumes 08/24/23 12:14:41.796 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:14:41.825 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:14:41.835 +[BeforeEach] [sig-storage] CSIInlineVolumes test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 -[It] should surface a failure condition on a common issue like exceeded quota [Conformance] - test/e2e/apps/rc.go:83 -Jul 29 16:14:34.859: INFO: Creating quota "condition-test" that allows only two pods to run in the current namespace -STEP: Creating rc "condition-test" that asks for more than the allowed pod quota 07/29/23 16:14:35.924 -STEP: Checking rc "condition-test" has the desired failure condition set 07/29/23 16:14:35.943 -STEP: Scaling down rc "condition-test" to satisfy pod quota 07/29/23 16:14:36.962 -Jul 29 16:14:36.988: INFO: Updating replication controller "condition-test" -STEP: Checking rc "condition-test" has no failure condition set 07/29/23 16:14:36.989 -[AfterEach] [sig-apps] ReplicationController +[It] should support ephemeral VolumeLifecycleMode in CSIDriver API [Conformance] + test/e2e/storage/csi_inline.go:46 +STEP: creating 08/24/23 12:14:41.844 +STEP: getting 08/24/23 12:14:41.881 +STEP: listing 08/24/23 12:14:41.892 +STEP: deleting 08/24/23 12:14:41.898 +[AfterEach] [sig-storage] CSIInlineVolumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:14:38.003: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ReplicationController +Aug 24 12:14:41.936: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ReplicationController +[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ReplicationController +[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes tear down framework | framework.go:193 -STEP: Destroying namespace "replication-controller-7805" for this suite. 07/29/23 16:14:38.014 +STEP: Destroying namespace "csiinlinevolumes-6987" for this suite. 08/24/23 12:14:41.95 ------------------------------ -• [3.233 seconds] -[sig-apps] ReplicationController -test/e2e/apps/framework.go:23 - should surface a failure condition on a common issue like exceeded quota [Conformance] - test/e2e/apps/rc.go:83 +• [0.174 seconds] +[sig-storage] CSIInlineVolumes +test/e2e/storage/utils/framework.go:23 + should support ephemeral VolumeLifecycleMode in CSIDriver API [Conformance] + test/e2e/storage/csi_inline.go:46 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ReplicationController + [BeforeEach] [sig-storage] CSIInlineVolumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:14:34.793 - Jul 29 16:14:34.794: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replication-controller 07/29/23 16:14:34.796 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:34.84 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:34.847 - [BeforeEach] [sig-apps] ReplicationController + STEP: Creating a kubernetes client 08/24/23 12:14:41.792 + Aug 24 12:14:41.792: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename csiinlinevolumes 08/24/23 12:14:41.796 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:14:41.825 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:14:41.835 + [BeforeEach] [sig-storage] CSIInlineVolumes test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 - [It] should surface a failure condition on a common issue like exceeded quota [Conformance] - test/e2e/apps/rc.go:83 - Jul 29 16:14:34.859: INFO: Creating quota "condition-test" that allows only two pods to run in the current namespace - STEP: Creating rc "condition-test" that asks for more than the allowed pod quota 07/29/23 16:14:35.924 - STEP: Checking rc "condition-test" has the desired failure condition set 07/29/23 16:14:35.943 - STEP: Scaling down rc "condition-test" to satisfy pod quota 07/29/23 16:14:36.962 - Jul 29 16:14:36.988: INFO: Updating replication controller "condition-test" - STEP: Checking rc "condition-test" has no failure condition set 07/29/23 16:14:36.989 - [AfterEach] [sig-apps] ReplicationController + [It] should support ephemeral VolumeLifecycleMode in CSIDriver API [Conformance] + test/e2e/storage/csi_inline.go:46 + STEP: creating 08/24/23 12:14:41.844 + STEP: getting 08/24/23 12:14:41.881 + STEP: listing 08/24/23 12:14:41.892 + STEP: deleting 08/24/23 12:14:41.898 + [AfterEach] [sig-storage] CSIInlineVolumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:14:38.003: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ReplicationController + Aug 24 12:14:41.936: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ReplicationController + [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ReplicationController + [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes tear down framework | framework.go:193 - STEP: Destroying namespace "replication-controller-7805" for this suite. 07/29/23 16:14:38.014 + STEP: Destroying namespace "csiinlinevolumes-6987" for this suite. 08/24/23 12:14:41.95 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPredicates [Serial] - validates that NodeSelector is respected if matching [Conformance] - test/e2e/scheduling/predicates.go:466 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] +[sig-storage] Projected configMap + optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:174 +[BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:14:38.03 -Jul 29 16:14:38.031: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sched-pred 07/29/23 16:14:38.033 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:38.061 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:38.067 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] +STEP: Creating a kubernetes client 08/24/23 12:14:41.97 +Aug 24 12:14:41.970: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:14:41.972 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:14:42.005 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:14:42.01 +[BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:97 -Jul 29 16:14:38.072: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready -Jul 29 16:14:38.087: INFO: Waiting for terminating namespaces to be deleted... -Jul 29 16:14:38.095: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-1 before test -Jul 29 16:14:38.111: INFO: cilium-cdv47 from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.111: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: cilium-node-init-jdrzm from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.111: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: coredns-787d4945fb-2xpvx from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.111: INFO: Container coredns ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: coredns-787d4945fb-clg7z from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.111: INFO: Container coredns ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: kube-addon-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.111: INFO: Container kube-addon-manager ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: kube-apiserver-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.111: INFO: Container kube-apiserver ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: kube-controller-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.111: INFO: Container kube-controller-manager ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: kube-proxy-zc9m8 from kube-system started at 2023-07-29 15:13:58 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.111: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: kube-scheduler-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.111: INFO: Container kube-scheduler ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:14:38.111: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: Container systemd-logs ready: true, restart count 0 -Jul 29 16:14:38.111: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-2 before test -Jul 29 16:14:38.126: INFO: cilium-kxphw from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.126: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:14:38.126: INFO: cilium-node-init-fqx5t from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.126: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:14:38.126: INFO: cilium-operator-8c499d9f6-hfgjd from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.127: INFO: Container cilium-operator ready: true, restart count 0 -Jul 29 16:14:38.127: INFO: kube-addon-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.127: INFO: Container kube-addon-manager ready: true, restart count 0 -Jul 29 16:14:38.127: INFO: kube-apiserver-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.127: INFO: Container kube-apiserver ready: true, restart count 0 -Jul 29 16:14:38.127: INFO: kube-controller-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.127: INFO: Container kube-controller-manager ready: true, restart count 0 -Jul 29 16:14:38.127: INFO: kube-proxy-gzqkk from kube-system started at 2023-07-29 15:14:12 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.127: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:14:38.127: INFO: kube-scheduler-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.127: INFO: Container kube-scheduler ready: true, restart count 0 -Jul 29 16:14:38.127: INFO: condition-test-kq5nm from replication-controller-7805 started at 2023-07-29 16:14:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.127: INFO: Container httpd ready: false, restart count 0 -Jul 29 16:14:38.127: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-xnwv4 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:14:38.127: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:14:38.127: INFO: Container systemd-logs ready: true, restart count 0 -Jul 29 16:14:38.127: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-3 before test -Jul 29 16:14:38.144: INFO: cilium-node-init-9ghzk from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.144: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:14:38.144: INFO: cilium-v9c5p from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.144: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:14:38.145: INFO: kube-proxy-v77tx from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.145: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:14:38.145: INFO: condition-test-m795z from replication-controller-7805 started at 2023-07-29 16:14:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.145: INFO: Container httpd ready: true, restart count 0 -Jul 29 16:14:38.145: INFO: sonobuoy from sonobuoy started at 2023-07-29 15:28:59 +0000 UTC (1 container statuses recorded) -Jul 29 16:14:38.145: INFO: Container kube-sonobuoy ready: true, restart count 0 -Jul 29 16:14:38.145: INFO: sonobuoy-e2e-job-7bf00df102b6496e from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:14:38.145: INFO: Container e2e ready: true, restart count 0 -Jul 29 16:14:38.145: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:14:38.145: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-k84f6 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:14:38.145: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:14:38.145: INFO: Container systemd-logs ready: true, restart count 0 -[It] validates that NodeSelector is respected if matching [Conformance] - test/e2e/scheduling/predicates.go:466 -STEP: Trying to launch a pod without a label to get a node which can launch it. 07/29/23 16:14:38.146 -Jul 29 16:14:38.183: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-pred-8308" to be "running" -Jul 29 16:14:38.190: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 6.342886ms -Jul 29 16:14:40.199: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.015544446s -Jul 29 16:14:40.199: INFO: Pod "without-label" satisfied condition "running" -STEP: Explicitly delete pod here to free the resource it takes. 07/29/23 16:14:40.205 -STEP: Trying to apply a random label on the found node. 07/29/23 16:14:40.228 -STEP: verifying the node has the label kubernetes.io/e2e-542921a3-5d95-4ced-9a7e-0273695774b9 42 07/29/23 16:14:40.246 -STEP: Trying to relaunch the pod, now with labels. 07/29/23 16:14:40.258 -Jul 29 16:14:40.277: INFO: Waiting up to 5m0s for pod "with-labels" in namespace "sched-pred-8308" to be "not pending" -Jul 29 16:14:40.284: INFO: Pod "with-labels": Phase="Pending", Reason="", readiness=false. Elapsed: 6.948232ms -Jul 29 16:14:42.291: INFO: Pod "with-labels": Phase="Running", Reason="", readiness=true. Elapsed: 2.014643635s -Jul 29 16:14:42.292: INFO: Pod "with-labels" satisfied condition "not pending" -STEP: removing the label kubernetes.io/e2e-542921a3-5d95-4ced-9a7e-0273695774b9 off the node wetuj3nuajog-3 07/29/23 16:14:42.306 -STEP: verifying the node doesn't have the label kubernetes.io/e2e-542921a3-5d95-4ced-9a7e-0273695774b9 07/29/23 16:14:42.346 -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:174 +STEP: Creating configMap with name cm-test-opt-del-14023a48-ccee-45e5-b391-e68fb7f9b2b6 08/24/23 12:14:42.027 +STEP: Creating configMap with name cm-test-opt-upd-3d78a705-8deb-4f4a-b661-7f4f684a0426 08/24/23 12:14:42.044 +STEP: Creating the pod 08/24/23 12:14:42.053 +Aug 24 12:14:42.074: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a" in namespace "projected-909" to be "running and ready" +Aug 24 12:14:42.096: INFO: Pod "pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a": Phase="Pending", Reason="", readiness=false. Elapsed: 21.802ms +Aug 24 12:14:42.096: INFO: The phase of Pod pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:14:44.105: INFO: Pod "pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.031119716s +Aug 24 12:14:44.105: INFO: The phase of Pod pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:14:46.120: INFO: Pod "pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a": Phase="Running", Reason="", readiness=true. Elapsed: 4.04552002s +Aug 24 12:14:46.120: INFO: The phase of Pod pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a is Running (Ready = true) +Aug 24 12:14:46.120: INFO: Pod "pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a" satisfied condition "running and ready" +STEP: Deleting configmap cm-test-opt-del-14023a48-ccee-45e5-b391-e68fb7f9b2b6 08/24/23 12:14:46.19 +STEP: Updating configmap cm-test-opt-upd-3d78a705-8deb-4f4a-b661-7f4f684a0426 08/24/23 12:14:46.201 +STEP: Creating configMap with name cm-test-opt-create-42f9daf7-8da3-49eb-a9c0-98d14a7dedde 08/24/23 12:14:46.219 +STEP: waiting to observe update in volume 08/24/23 12:14:46.227 +[AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:14:42.367: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:88 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] +Aug 24 12:16:11.095: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] +[DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] +[DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 -STEP: Destroying namespace "sched-pred-8308" for this suite. 07/29/23 16:14:42.387 +STEP: Destroying namespace "projected-909" for this suite. 08/24/23 12:16:11.105 ------------------------------ -• [4.378 seconds] -[sig-scheduling] SchedulerPredicates [Serial] -test/e2e/scheduling/framework.go:40 - validates that NodeSelector is respected if matching [Conformance] - test/e2e/scheduling/predicates.go:466 +• [SLOW TEST] [89.149 seconds] +[sig-storage] Projected configMap +test/e2e/common/storage/framework.go:23 + optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:174 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + [BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:14:38.03 - Jul 29 16:14:38.031: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sched-pred 07/29/23 16:14:38.033 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:38.061 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:38.067 - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + STEP: Creating a kubernetes client 08/24/23 12:14:41.97 + Aug 24 12:14:41.970: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:14:41.972 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:14:42.005 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:14:42.01 + [BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:97 - Jul 29 16:14:38.072: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready - Jul 29 16:14:38.087: INFO: Waiting for terminating namespaces to be deleted... - Jul 29 16:14:38.095: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-1 before test - Jul 29 16:14:38.111: INFO: cilium-cdv47 from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.111: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: cilium-node-init-jdrzm from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.111: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: coredns-787d4945fb-2xpvx from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.111: INFO: Container coredns ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: coredns-787d4945fb-clg7z from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.111: INFO: Container coredns ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: kube-addon-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.111: INFO: Container kube-addon-manager ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: kube-apiserver-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.111: INFO: Container kube-apiserver ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: kube-controller-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.111: INFO: Container kube-controller-manager ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: kube-proxy-zc9m8 from kube-system started at 2023-07-29 15:13:58 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.111: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: kube-scheduler-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.111: INFO: Container kube-scheduler ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:14:38.111: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: Container systemd-logs ready: true, restart count 0 - Jul 29 16:14:38.111: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-2 before test - Jul 29 16:14:38.126: INFO: cilium-kxphw from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.126: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:14:38.126: INFO: cilium-node-init-fqx5t from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.126: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:14:38.126: INFO: cilium-operator-8c499d9f6-hfgjd from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.127: INFO: Container cilium-operator ready: true, restart count 0 - Jul 29 16:14:38.127: INFO: kube-addon-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.127: INFO: Container kube-addon-manager ready: true, restart count 0 - Jul 29 16:14:38.127: INFO: kube-apiserver-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.127: INFO: Container kube-apiserver ready: true, restart count 0 - Jul 29 16:14:38.127: INFO: kube-controller-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.127: INFO: Container kube-controller-manager ready: true, restart count 0 - Jul 29 16:14:38.127: INFO: kube-proxy-gzqkk from kube-system started at 2023-07-29 15:14:12 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.127: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:14:38.127: INFO: kube-scheduler-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.127: INFO: Container kube-scheduler ready: true, restart count 0 - Jul 29 16:14:38.127: INFO: condition-test-kq5nm from replication-controller-7805 started at 2023-07-29 16:14:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.127: INFO: Container httpd ready: false, restart count 0 - Jul 29 16:14:38.127: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-xnwv4 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:14:38.127: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:14:38.127: INFO: Container systemd-logs ready: true, restart count 0 - Jul 29 16:14:38.127: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-3 before test - Jul 29 16:14:38.144: INFO: cilium-node-init-9ghzk from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.144: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:14:38.144: INFO: cilium-v9c5p from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.144: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:14:38.145: INFO: kube-proxy-v77tx from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.145: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:14:38.145: INFO: condition-test-m795z from replication-controller-7805 started at 2023-07-29 16:14:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.145: INFO: Container httpd ready: true, restart count 0 - Jul 29 16:14:38.145: INFO: sonobuoy from sonobuoy started at 2023-07-29 15:28:59 +0000 UTC (1 container statuses recorded) - Jul 29 16:14:38.145: INFO: Container kube-sonobuoy ready: true, restart count 0 - Jul 29 16:14:38.145: INFO: sonobuoy-e2e-job-7bf00df102b6496e from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:14:38.145: INFO: Container e2e ready: true, restart count 0 - Jul 29 16:14:38.145: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:14:38.145: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-k84f6 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:14:38.145: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:14:38.145: INFO: Container systemd-logs ready: true, restart count 0 - [It] validates that NodeSelector is respected if matching [Conformance] - test/e2e/scheduling/predicates.go:466 - STEP: Trying to launch a pod without a label to get a node which can launch it. 07/29/23 16:14:38.146 - Jul 29 16:14:38.183: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-pred-8308" to be "running" - Jul 29 16:14:38.190: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 6.342886ms - Jul 29 16:14:40.199: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.015544446s - Jul 29 16:14:40.199: INFO: Pod "without-label" satisfied condition "running" - STEP: Explicitly delete pod here to free the resource it takes. 07/29/23 16:14:40.205 - STEP: Trying to apply a random label on the found node. 07/29/23 16:14:40.228 - STEP: verifying the node has the label kubernetes.io/e2e-542921a3-5d95-4ced-9a7e-0273695774b9 42 07/29/23 16:14:40.246 - STEP: Trying to relaunch the pod, now with labels. 07/29/23 16:14:40.258 - Jul 29 16:14:40.277: INFO: Waiting up to 5m0s for pod "with-labels" in namespace "sched-pred-8308" to be "not pending" - Jul 29 16:14:40.284: INFO: Pod "with-labels": Phase="Pending", Reason="", readiness=false. Elapsed: 6.948232ms - Jul 29 16:14:42.291: INFO: Pod "with-labels": Phase="Running", Reason="", readiness=true. Elapsed: 2.014643635s - Jul 29 16:14:42.292: INFO: Pod "with-labels" satisfied condition "not pending" - STEP: removing the label kubernetes.io/e2e-542921a3-5d95-4ced-9a7e-0273695774b9 off the node wetuj3nuajog-3 07/29/23 16:14:42.306 - STEP: verifying the node doesn't have the label kubernetes.io/e2e-542921a3-5d95-4ced-9a7e-0273695774b9 07/29/23 16:14:42.346 - [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + [It] optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:174 + STEP: Creating configMap with name cm-test-opt-del-14023a48-ccee-45e5-b391-e68fb7f9b2b6 08/24/23 12:14:42.027 + STEP: Creating configMap with name cm-test-opt-upd-3d78a705-8deb-4f4a-b661-7f4f684a0426 08/24/23 12:14:42.044 + STEP: Creating the pod 08/24/23 12:14:42.053 + Aug 24 12:14:42.074: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a" in namespace "projected-909" to be "running and ready" + Aug 24 12:14:42.096: INFO: Pod "pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a": Phase="Pending", Reason="", readiness=false. Elapsed: 21.802ms + Aug 24 12:14:42.096: INFO: The phase of Pod pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:14:44.105: INFO: Pod "pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.031119716s + Aug 24 12:14:44.105: INFO: The phase of Pod pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:14:46.120: INFO: Pod "pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a": Phase="Running", Reason="", readiness=true. Elapsed: 4.04552002s + Aug 24 12:14:46.120: INFO: The phase of Pod pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a is Running (Ready = true) + Aug 24 12:14:46.120: INFO: Pod "pod-projected-configmaps-791c183f-72c3-49a3-b28a-f68de8e46e8a" satisfied condition "running and ready" + STEP: Deleting configmap cm-test-opt-del-14023a48-ccee-45e5-b391-e68fb7f9b2b6 08/24/23 12:14:46.19 + STEP: Updating configmap cm-test-opt-upd-3d78a705-8deb-4f4a-b661-7f4f684a0426 08/24/23 12:14:46.201 + STEP: Creating configMap with name cm-test-opt-create-42f9daf7-8da3-49eb-a9c0-98d14a7dedde 08/24/23 12:14:46.219 + STEP: waiting to observe update in volume 08/24/23 12:14:46.227 + [AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:14:42.367: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:88 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + Aug 24 12:16:11.095: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + [DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + [DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 - STEP: Destroying namespace "sched-pred-8308" for this suite. 07/29/23 16:14:42.387 + STEP: Destroying namespace "projected-909" for this suite. 08/24/23 12:16:11.105 << End Captured GinkgoWriter Output ------------------------------ -[sig-apps] Deployment - RollingUpdateDeployment should delete old pods and create new ones [Conformance] - test/e2e/apps/deployment.go:105 -[BeforeEach] [sig-apps] Deployment +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + updates the published spec when one version gets renamed [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:391 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:14:42.419 -Jul 29 16:14:42.421: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename deployment 07/29/23 16:14:42.428 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:42.461 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:42.47 -[BeforeEach] [sig-apps] Deployment +STEP: Creating a kubernetes client 08/24/23 12:16:11.137 +Aug 24 12:16:11.137: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:16:11.14 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:16:11.173 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:16:11.179 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 -[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance] - test/e2e/apps/deployment.go:105 -Jul 29 16:14:42.478: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted) -Jul 29 16:14:42.504: INFO: Pod name sample-pod: Found 0 pods out of 1 -Jul 29 16:14:47.515: INFO: Pod name sample-pod: Found 1 pods out of 1 -STEP: ensuring each pod is running 07/29/23 16:14:47.515 -Jul 29 16:14:47.516: INFO: Creating deployment "test-rolling-update-deployment" -Jul 29 16:14:47.542: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has -Jul 29 16:14:47.552: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created -Jul 29 16:14:49.587: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected -Jul 29 16:14:49.596: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted) -[AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 -Jul 29 16:14:49.621: INFO: Deployment "test-rolling-update-deployment": -&Deployment{ObjectMeta:{test-rolling-update-deployment deployment-2516 79e3a1c2-c27a-41a5-8fbf-a7a931c32541 20682 1 2023-07-29 16:14:47 +0000 UTC map[name:sample-pod] map[deployment.kubernetes.io/revision:3546343826724305833] [] [] [{e2e.test Update apps/v1 2023-07-29 16:14:47 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:14:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006aa6198 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-07-29 16:14:47 +0000 UTC,LastTransitionTime:2023-07-29 16:14:47 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rolling-update-deployment-7549d9f46d" has successfully progressed.,LastUpdateTime:2023-07-29 16:14:49 +0000 UTC,LastTransitionTime:2023-07-29 16:14:47 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - -Jul 29 16:14:49.630: INFO: New ReplicaSet "test-rolling-update-deployment-7549d9f46d" of Deployment "test-rolling-update-deployment": -&ReplicaSet{ObjectMeta:{test-rolling-update-deployment-7549d9f46d deployment-2516 71591eb2-bd53-4573-97d3-c206de334970 20668 1 2023-07-29 16:14:47 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305833] [{apps/v1 Deployment test-rolling-update-deployment 79e3a1c2-c27a-41a5-8fbf-a7a931c32541 0xc006aa66a7 0xc006aa66a8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 16:14:47 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"79e3a1c2-c27a-41a5-8fbf-a7a931c32541\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:14:49 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 7549d9f46d,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006aa6758 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} -Jul 29 16:14:49.630: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment": -Jul 29 16:14:49.631: INFO: &ReplicaSet{ObjectMeta:{test-rolling-update-controller deployment-2516 a3c78444-ce4c-4521-b028-b4ee16c90c56 20680 2 2023-07-29 16:14:42 +0000 UTC map[name:sample-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305832] [{apps/v1 Deployment test-rolling-update-deployment 79e3a1c2-c27a-41a5-8fbf-a7a931c32541 0xc006aa6577 0xc006aa6578}] [] [{e2e.test Update apps/v1 2023-07-29 16:14:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:14:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"79e3a1c2-c27a-41a5-8fbf-a7a931c32541\"}":{}}},"f:spec":{"f:replicas":{}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:14:49 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc006aa6638 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Jul 29 16:14:49.640: INFO: Pod "test-rolling-update-deployment-7549d9f46d-b84wz" is available: -&Pod{ObjectMeta:{test-rolling-update-deployment-7549d9f46d-b84wz test-rolling-update-deployment-7549d9f46d- deployment-2516 ee4a763f-5aab-40f6-bd37-f0fac8666632 20667 0 2023-07-29 16:14:47 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[] [{apps/v1 ReplicaSet test-rolling-update-deployment-7549d9f46d 71591eb2-bd53-4573-97d3-c206de334970 0xc001e024d7 0xc001e024d8}] [] [{kube-controller-manager Update v1 2023-07-29 16:14:47 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"71591eb2-bd53-4573-97d3-c206de334970\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 16:14:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.177\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-pf725,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pf725,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:14:47 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:14:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:14:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:14:47 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.177,StartTime:2023-07-29 16:14:47 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 16:14:48 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,ImageID:registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e,ContainerID:cri-o://3ef56ae695d9e47378152999057d60efd3f5cb762f8bfe23d8697b4911bf5082,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.177,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +[It] updates the published spec when one version gets renamed [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:391 +STEP: set up a multi version CRD 08/24/23 12:16:11.184 +Aug 24 12:16:11.185: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: rename a version 08/24/23 12:16:17.374 +STEP: check the new version name is served 08/24/23 12:16:17.405 +STEP: check the old version name is removed 08/24/23 12:16:20.248 +STEP: check the other version is not changed 08/24/23 12:16:21.148 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:14:49.640: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Deployment +Aug 24 12:16:25.850: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Deployment - tear down framework | framework.go:193 -STEP: Destroying namespace "deployment-2516" for this suite. 07/29/23 16:14:49.652 ------------------------------- -• [SLOW TEST] [7.257 seconds] -[sig-apps] Deployment -test/e2e/apps/framework.go:23 - RollingUpdateDeployment should delete old pods and create new ones [Conformance] - test/e2e/apps/deployment.go:105 +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + tear down framework | framework.go:193 +STEP: Destroying namespace "crd-publish-openapi-3412" for this suite. 08/24/23 12:16:25.869 +------------------------------ +• [SLOW TEST] [14.746 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + updates the published spec when one version gets renamed [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:391 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Deployment + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:14:42.419 - Jul 29 16:14:42.421: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename deployment 07/29/23 16:14:42.428 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:42.461 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:42.47 - [BeforeEach] [sig-apps] Deployment + STEP: Creating a kubernetes client 08/24/23 12:16:11.137 + Aug 24 12:16:11.137: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:16:11.14 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:16:11.173 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:16:11.179 + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 - [It] RollingUpdateDeployment should delete old pods and create new ones [Conformance] - test/e2e/apps/deployment.go:105 - Jul 29 16:14:42.478: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted) - Jul 29 16:14:42.504: INFO: Pod name sample-pod: Found 0 pods out of 1 - Jul 29 16:14:47.515: INFO: Pod name sample-pod: Found 1 pods out of 1 - STEP: ensuring each pod is running 07/29/23 16:14:47.515 - Jul 29 16:14:47.516: INFO: Creating deployment "test-rolling-update-deployment" - Jul 29 16:14:47.542: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has - Jul 29 16:14:47.552: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created - Jul 29 16:14:49.587: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected - Jul 29 16:14:49.596: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted) - [AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 - Jul 29 16:14:49.621: INFO: Deployment "test-rolling-update-deployment": - &Deployment{ObjectMeta:{test-rolling-update-deployment deployment-2516 79e3a1c2-c27a-41a5-8fbf-a7a931c32541 20682 1 2023-07-29 16:14:47 +0000 UTC map[name:sample-pod] map[deployment.kubernetes.io/revision:3546343826724305833] [] [] [{e2e.test Update apps/v1 2023-07-29 16:14:47 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:14:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006aa6198 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-07-29 16:14:47 +0000 UTC,LastTransitionTime:2023-07-29 16:14:47 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rolling-update-deployment-7549d9f46d" has successfully progressed.,LastUpdateTime:2023-07-29 16:14:49 +0000 UTC,LastTransitionTime:2023-07-29 16:14:47 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - - Jul 29 16:14:49.630: INFO: New ReplicaSet "test-rolling-update-deployment-7549d9f46d" of Deployment "test-rolling-update-deployment": - &ReplicaSet{ObjectMeta:{test-rolling-update-deployment-7549d9f46d deployment-2516 71591eb2-bd53-4573-97d3-c206de334970 20668 1 2023-07-29 16:14:47 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305833] [{apps/v1 Deployment test-rolling-update-deployment 79e3a1c2-c27a-41a5-8fbf-a7a931c32541 0xc006aa66a7 0xc006aa66a8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 16:14:47 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"79e3a1c2-c27a-41a5-8fbf-a7a931c32541\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:14:49 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 7549d9f46d,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006aa6758 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} - Jul 29 16:14:49.630: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment": - Jul 29 16:14:49.631: INFO: &ReplicaSet{ObjectMeta:{test-rolling-update-controller deployment-2516 a3c78444-ce4c-4521-b028-b4ee16c90c56 20680 2 2023-07-29 16:14:42 +0000 UTC map[name:sample-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305832] [{apps/v1 Deployment test-rolling-update-deployment 79e3a1c2-c27a-41a5-8fbf-a7a931c32541 0xc006aa6577 0xc006aa6578}] [] [{e2e.test Update apps/v1 2023-07-29 16:14:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:14:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"79e3a1c2-c27a-41a5-8fbf-a7a931c32541\"}":{}}},"f:spec":{"f:replicas":{}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:14:49 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc006aa6638 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - Jul 29 16:14:49.640: INFO: Pod "test-rolling-update-deployment-7549d9f46d-b84wz" is available: - &Pod{ObjectMeta:{test-rolling-update-deployment-7549d9f46d-b84wz test-rolling-update-deployment-7549d9f46d- deployment-2516 ee4a763f-5aab-40f6-bd37-f0fac8666632 20667 0 2023-07-29 16:14:47 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[] [{apps/v1 ReplicaSet test-rolling-update-deployment-7549d9f46d 71591eb2-bd53-4573-97d3-c206de334970 0xc001e024d7 0xc001e024d8}] [] [{kube-controller-manager Update v1 2023-07-29 16:14:47 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"71591eb2-bd53-4573-97d3-c206de334970\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 16:14:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.177\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-pf725,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pf725,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:14:47 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:14:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:14:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:14:47 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.177,StartTime:2023-07-29 16:14:47 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 16:14:48 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,ImageID:registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e,ContainerID:cri-o://3ef56ae695d9e47378152999057d60efd3f5cb762f8bfe23d8697b4911bf5082,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.177,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - [AfterEach] [sig-apps] Deployment + [It] updates the published spec when one version gets renamed [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:391 + STEP: set up a multi version CRD 08/24/23 12:16:11.184 + Aug 24 12:16:11.185: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: rename a version 08/24/23 12:16:17.374 + STEP: check the new version name is served 08/24/23 12:16:17.405 + STEP: check the old version name is removed 08/24/23 12:16:20.248 + STEP: check the other version is not changed 08/24/23 12:16:21.148 + [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:14:49.640: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Deployment + Aug 24 12:16:25.850: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "deployment-2516" for this suite. 07/29/23 16:14:49.652 + STEP: Destroying namespace "crd-publish-openapi-3412" for this suite. 08/24/23 12:16:25.869 << End Captured GinkgoWriter Output ------------------------------ -SSSSSS +SSSSSSS ------------------------------ -[sig-node] ConfigMap - should be consumable via the environment [NodeConformance] [Conformance] - test/e2e/common/node/configmap.go:93 -[BeforeEach] [sig-node] ConfigMap +[sig-apps] CronJob + should not schedule jobs when suspended [Slow] [Conformance] + test/e2e/apps/cronjob.go:96 +[BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:14:49.674 -Jul 29 16:14:49.674: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:14:49.677 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:49.713 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:49.719 -[BeforeEach] [sig-node] ConfigMap +STEP: Creating a kubernetes client 08/24/23 12:16:25.888 +Aug 24 12:16:25.888: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename cronjob 08/24/23 12:16:25.891 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:16:25.921 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:16:25.926 +[BeforeEach] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable via the environment [NodeConformance] [Conformance] - test/e2e/common/node/configmap.go:93 -STEP: Creating configMap configmap-5215/configmap-test-cb5b0f3f-a58c-4107-8d0c-f2946bbe09a1 07/29/23 16:14:49.727 -STEP: Creating a pod to test consume configMaps 07/29/23 16:14:49.738 -Jul 29 16:14:49.756: INFO: Waiting up to 5m0s for pod "pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693" in namespace "configmap-5215" to be "Succeeded or Failed" -Jul 29 16:14:49.767: INFO: Pod "pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693": Phase="Pending", Reason="", readiness=false. Elapsed: 10.579501ms -Jul 29 16:14:51.775: INFO: Pod "pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019170519s -Jul 29 16:14:53.776: INFO: Pod "pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020032247s -STEP: Saw pod success 07/29/23 16:14:53.776 -Jul 29 16:14:53.777: INFO: Pod "pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693" satisfied condition "Succeeded or Failed" -Jul 29 16:14:53.783: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693 container env-test: -STEP: delete the pod 07/29/23 16:14:53.806 -Jul 29 16:14:53.827: INFO: Waiting for pod pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693 to disappear -Jul 29 16:14:53.834: INFO: Pod pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693 no longer exists -[AfterEach] [sig-node] ConfigMap +[It] should not schedule jobs when suspended [Slow] [Conformance] + test/e2e/apps/cronjob.go:96 +STEP: Creating a suspended cronjob 08/24/23 12:16:25.93 +STEP: Ensuring no jobs are scheduled 08/24/23 12:16:25.943 +STEP: Ensuring no job exists by listing jobs explicitly 08/24/23 12:21:25.958 +STEP: Removing cronjob 08/24/23 12:21:25.965 +[AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 -Jul 29 16:14:53.835: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] ConfigMap +Aug 24 12:21:25.976: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] ConfigMap +[DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] ConfigMap +[DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-5215" for this suite. 07/29/23 16:14:53.845 +STEP: Destroying namespace "cronjob-9111" for this suite. 08/24/23 12:21:25.989 ------------------------------ -• [4.183 seconds] -[sig-node] ConfigMap -test/e2e/common/node/framework.go:23 - should be consumable via the environment [NodeConformance] [Conformance] - test/e2e/common/node/configmap.go:93 +• [SLOW TEST] [300.114 seconds] +[sig-apps] CronJob +test/e2e/apps/framework.go:23 + should not schedule jobs when suspended [Slow] [Conformance] + test/e2e/apps/cronjob.go:96 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] ConfigMap + [BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:14:49.674 - Jul 29 16:14:49.674: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:14:49.677 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:49.713 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:49.719 - [BeforeEach] [sig-node] ConfigMap + STEP: Creating a kubernetes client 08/24/23 12:16:25.888 + Aug 24 12:16:25.888: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename cronjob 08/24/23 12:16:25.891 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:16:25.921 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:16:25.926 + [BeforeEach] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable via the environment [NodeConformance] [Conformance] - test/e2e/common/node/configmap.go:93 - STEP: Creating configMap configmap-5215/configmap-test-cb5b0f3f-a58c-4107-8d0c-f2946bbe09a1 07/29/23 16:14:49.727 - STEP: Creating a pod to test consume configMaps 07/29/23 16:14:49.738 - Jul 29 16:14:49.756: INFO: Waiting up to 5m0s for pod "pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693" in namespace "configmap-5215" to be "Succeeded or Failed" - Jul 29 16:14:49.767: INFO: Pod "pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693": Phase="Pending", Reason="", readiness=false. Elapsed: 10.579501ms - Jul 29 16:14:51.775: INFO: Pod "pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019170519s - Jul 29 16:14:53.776: INFO: Pod "pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020032247s - STEP: Saw pod success 07/29/23 16:14:53.776 - Jul 29 16:14:53.777: INFO: Pod "pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693" satisfied condition "Succeeded or Failed" - Jul 29 16:14:53.783: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693 container env-test: - STEP: delete the pod 07/29/23 16:14:53.806 - Jul 29 16:14:53.827: INFO: Waiting for pod pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693 to disappear - Jul 29 16:14:53.834: INFO: Pod pod-configmaps-f9185d3e-8b29-4844-96a3-a21924463693 no longer exists - [AfterEach] [sig-node] ConfigMap + [It] should not schedule jobs when suspended [Slow] [Conformance] + test/e2e/apps/cronjob.go:96 + STEP: Creating a suspended cronjob 08/24/23 12:16:25.93 + STEP: Ensuring no jobs are scheduled 08/24/23 12:16:25.943 + STEP: Ensuring no job exists by listing jobs explicitly 08/24/23 12:21:25.958 + STEP: Removing cronjob 08/24/23 12:21:25.965 + [AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 - Jul 29 16:14:53.835: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] ConfigMap + Aug 24 12:21:25.976: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] ConfigMap + [DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] ConfigMap + [DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-5215" for this suite. 07/29/23 16:14:53.845 + STEP: Destroying namespace "cronjob-9111" for this suite. 08/24/23 12:21:25.989 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-storage] EmptyDir volumes - should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:147 + should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:97 [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:14:53.858 -Jul 29 16:14:53.858: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 16:14:53.862 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:53.899 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:53.901 +STEP: Creating a kubernetes client 08/24/23 12:21:26.004 +Aug 24 12:21:26.004: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:21:26.007 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:26.037 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:26.041 [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[It] should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:147 -STEP: Creating a pod to test emptydir 0777 on tmpfs 07/29/23 16:14:53.906 -Jul 29 16:14:53.919: INFO: Waiting up to 5m0s for pod "pod-47767a9e-d899-4bc2-81aa-0621cb811672" in namespace "emptydir-6734" to be "Succeeded or Failed" -Jul 29 16:14:53.924: INFO: Pod "pod-47767a9e-d899-4bc2-81aa-0621cb811672": Phase="Pending", Reason="", readiness=false. Elapsed: 4.933924ms -Jul 29 16:14:55.947: INFO: Pod "pod-47767a9e-d899-4bc2-81aa-0621cb811672": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027274802s -Jul 29 16:14:57.933: INFO: Pod "pod-47767a9e-d899-4bc2-81aa-0621cb811672": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013384112s -STEP: Saw pod success 07/29/23 16:14:57.933 -Jul 29 16:14:57.934: INFO: Pod "pod-47767a9e-d899-4bc2-81aa-0621cb811672" satisfied condition "Succeeded or Failed" -Jul 29 16:14:57.940: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-47767a9e-d899-4bc2-81aa-0621cb811672 container test-container: -STEP: delete the pod 07/29/23 16:14:57.953 -Jul 29 16:14:57.978: INFO: Waiting for pod pod-47767a9e-d899-4bc2-81aa-0621cb811672 to disappear -Jul 29 16:14:57.985: INFO: Pod pod-47767a9e-d899-4bc2-81aa-0621cb811672 no longer exists +[It] should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:97 +STEP: Creating a pod to test emptydir 0644 on tmpfs 08/24/23 12:21:26.046 +Aug 24 12:21:26.063: INFO: Waiting up to 5m0s for pod "pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c" in namespace "emptydir-9227" to be "Succeeded or Failed" +Aug 24 12:21:26.080: INFO: Pod "pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c": Phase="Pending", Reason="", readiness=false. Elapsed: 16.770926ms +Aug 24 12:21:28.091: INFO: Pod "pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027434586s +Aug 24 12:21:30.090: INFO: Pod "pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026609192s +STEP: Saw pod success 08/24/23 12:21:30.09 +Aug 24 12:21:30.091: INFO: Pod "pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c" satisfied condition "Succeeded or Failed" +Aug 24 12:21:30.103: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c container test-container: +STEP: delete the pod 08/24/23 12:21:30.135 +Aug 24 12:21:30.165: INFO: Waiting for pod pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c to disappear +Aug 24 12:21:30.171: INFO: Pod pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c no longer exists [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:14:57.986: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:21:30.171: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-6734" for this suite. 07/29/23 16:14:57.994 +STEP: Destroying namespace "emptydir-9227" for this suite. 08/24/23 12:21:30.184 ------------------------------ -• [4.148 seconds] +• [4.191 seconds] [sig-storage] EmptyDir volumes test/e2e/common/storage/framework.go:23 - should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:147 + should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:97 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:14:53.858 - Jul 29 16:14:53.858: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 16:14:53.862 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:53.899 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:53.901 + STEP: Creating a kubernetes client 08/24/23 12:21:26.004 + Aug 24 12:21:26.004: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:21:26.007 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:26.037 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:26.041 [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [It] should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:147 - STEP: Creating a pod to test emptydir 0777 on tmpfs 07/29/23 16:14:53.906 - Jul 29 16:14:53.919: INFO: Waiting up to 5m0s for pod "pod-47767a9e-d899-4bc2-81aa-0621cb811672" in namespace "emptydir-6734" to be "Succeeded or Failed" - Jul 29 16:14:53.924: INFO: Pod "pod-47767a9e-d899-4bc2-81aa-0621cb811672": Phase="Pending", Reason="", readiness=false. Elapsed: 4.933924ms - Jul 29 16:14:55.947: INFO: Pod "pod-47767a9e-d899-4bc2-81aa-0621cb811672": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027274802s - Jul 29 16:14:57.933: INFO: Pod "pod-47767a9e-d899-4bc2-81aa-0621cb811672": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013384112s - STEP: Saw pod success 07/29/23 16:14:57.933 - Jul 29 16:14:57.934: INFO: Pod "pod-47767a9e-d899-4bc2-81aa-0621cb811672" satisfied condition "Succeeded or Failed" - Jul 29 16:14:57.940: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-47767a9e-d899-4bc2-81aa-0621cb811672 container test-container: - STEP: delete the pod 07/29/23 16:14:57.953 - Jul 29 16:14:57.978: INFO: Waiting for pod pod-47767a9e-d899-4bc2-81aa-0621cb811672 to disappear - Jul 29 16:14:57.985: INFO: Pod pod-47767a9e-d899-4bc2-81aa-0621cb811672 no longer exists + [It] should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:97 + STEP: Creating a pod to test emptydir 0644 on tmpfs 08/24/23 12:21:26.046 + Aug 24 12:21:26.063: INFO: Waiting up to 5m0s for pod "pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c" in namespace "emptydir-9227" to be "Succeeded or Failed" + Aug 24 12:21:26.080: INFO: Pod "pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c": Phase="Pending", Reason="", readiness=false. Elapsed: 16.770926ms + Aug 24 12:21:28.091: INFO: Pod "pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027434586s + Aug 24 12:21:30.090: INFO: Pod "pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026609192s + STEP: Saw pod success 08/24/23 12:21:30.09 + Aug 24 12:21:30.091: INFO: Pod "pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c" satisfied condition "Succeeded or Failed" + Aug 24 12:21:30.103: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c container test-container: + STEP: delete the pod 08/24/23 12:21:30.135 + Aug 24 12:21:30.165: INFO: Waiting for pod pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c to disappear + Aug 24 12:21:30.171: INFO: Pod pod-4f8d555b-0c10-45e6-b3a8-73a1bf2fce0c no longer exists [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:14:57.986: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:21:30.171: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-6734" for this suite. 07/29/23 16:14:57.994 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-apps] ReplicationController - should serve a basic image on each replica with a public image [Conformance] - test/e2e/apps/rc.go:67 -[BeforeEach] [sig-apps] ReplicationController - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:14:58.025 -Jul 29 16:14:58.025: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replication-controller 07/29/23 16:14:58.028 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:58.067 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:58.071 -[BeforeEach] [sig-apps] ReplicationController - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 -[It] should serve a basic image on each replica with a public image [Conformance] - test/e2e/apps/rc.go:67 -STEP: Creating replication controller my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b 07/29/23 16:14:58.076 -Jul 29 16:14:58.089: INFO: Pod name my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b: Found 0 pods out of 1 -Jul 29 16:15:03.096: INFO: Pod name my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b: Found 1 pods out of 1 -Jul 29 16:15:03.096: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b" are running -Jul 29 16:15:03.096: INFO: Waiting up to 5m0s for pod "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq" in namespace "replication-controller-852" to be "running" -Jul 29 16:15:03.102: INFO: Pod "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq": Phase="Running", Reason="", readiness=true. Elapsed: 5.56591ms -Jul 29 16:15:03.102: INFO: Pod "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq" satisfied condition "running" -Jul 29 16:15:03.102: INFO: Pod "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:14:58 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:14:59 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:14:59 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:14:58 +0000 UTC Reason: Message:}]) -Jul 29 16:15:03.102: INFO: Trying to dial the pod -Jul 29 16:15:08.128: INFO: Controller my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b: Got expected result from replica 1 [my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq]: "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq", 1 of 1 required successes so far -[AfterEach] [sig-apps] ReplicationController - test/e2e/framework/node/init/init.go:32 -Jul 29 16:15:08.129: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ReplicationController - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ReplicationController - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ReplicationController - tear down framework | framework.go:193 -STEP: Destroying namespace "replication-controller-852" for this suite. 07/29/23 16:15:08.14 ------------------------------- -• [SLOW TEST] [10.128 seconds] -[sig-apps] ReplicationController -test/e2e/apps/framework.go:23 - should serve a basic image on each replica with a public image [Conformance] - test/e2e/apps/rc.go:67 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ReplicationController - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:14:58.025 - Jul 29 16:14:58.025: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replication-controller 07/29/23 16:14:58.028 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:14:58.067 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:14:58.071 - [BeforeEach] [sig-apps] ReplicationController - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 - [It] should serve a basic image on each replica with a public image [Conformance] - test/e2e/apps/rc.go:67 - STEP: Creating replication controller my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b 07/29/23 16:14:58.076 - Jul 29 16:14:58.089: INFO: Pod name my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b: Found 0 pods out of 1 - Jul 29 16:15:03.096: INFO: Pod name my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b: Found 1 pods out of 1 - Jul 29 16:15:03.096: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b" are running - Jul 29 16:15:03.096: INFO: Waiting up to 5m0s for pod "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq" in namespace "replication-controller-852" to be "running" - Jul 29 16:15:03.102: INFO: Pod "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq": Phase="Running", Reason="", readiness=true. Elapsed: 5.56591ms - Jul 29 16:15:03.102: INFO: Pod "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq" satisfied condition "running" - Jul 29 16:15:03.102: INFO: Pod "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:14:58 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:14:59 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:14:59 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:14:58 +0000 UTC Reason: Message:}]) - Jul 29 16:15:03.102: INFO: Trying to dial the pod - Jul 29 16:15:08.128: INFO: Controller my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b: Got expected result from replica 1 [my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq]: "my-hostname-basic-14c1004f-9a42-4f98-840a-3d48e74b336b-89dxq", 1 of 1 required successes so far - [AfterEach] [sig-apps] ReplicationController - test/e2e/framework/node/init/init.go:32 - Jul 29 16:15:08.129: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ReplicationController - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ReplicationController - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ReplicationController - tear down framework | framework.go:193 - STEP: Destroying namespace "replication-controller-852" for this suite. 07/29/23 16:15:08.14 + STEP: Destroying namespace "emptydir-9227" for this suite. 08/24/23 12:21:30.184 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Secrets - should be consumable from pods in env vars [NodeConformance] [Conformance] - test/e2e/common/node/secrets.go:46 -[BeforeEach] [sig-node] Secrets +[sig-node] Security Context When creating a pod with readOnlyRootFilesystem + should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:486 +[BeforeEach] [sig-node] Security Context set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:15:08.155 -Jul 29 16:15:08.155: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 16:15:08.159 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:08.196 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:08.203 -[BeforeEach] [sig-node] Secrets +STEP: Creating a kubernetes client 08/24/23 12:21:30.215 +Aug 24 12:21:30.215: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename security-context-test 08/24/23 12:21:30.218 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:30.266 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:30.276 +[BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in env vars [NodeConformance] [Conformance] - test/e2e/common/node/secrets.go:46 -STEP: Creating secret with name secret-test-39bbb299-fd85-4822-ac56-d119ab33d5be 07/29/23 16:15:08.209 -STEP: Creating a pod to test consume secrets 07/29/23 16:15:08.218 -Jul 29 16:15:08.231: INFO: Waiting up to 5m0s for pod "pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9" in namespace "secrets-7118" to be "Succeeded or Failed" -Jul 29 16:15:08.239: INFO: Pod "pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9": Phase="Pending", Reason="", readiness=false. Elapsed: 8.179301ms -Jul 29 16:15:10.248: INFO: Pod "pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01625112s -Jul 29 16:15:12.249: INFO: Pod "pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017843784s -STEP: Saw pod success 07/29/23 16:15:12.249 -Jul 29 16:15:12.250: INFO: Pod "pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9" satisfied condition "Succeeded or Failed" -Jul 29 16:15:12.257: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9 container secret-env-test: -STEP: delete the pod 07/29/23 16:15:12.275 -Jul 29 16:15:12.301: INFO: Waiting for pod pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9 to disappear -Jul 29 16:15:12.316: INFO: Pod pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9 no longer exists -[AfterEach] [sig-node] Secrets +[BeforeEach] [sig-node] Security Context + test/e2e/common/node/security_context.go:50 +[It] should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:486 +Aug 24 12:21:30.299: INFO: Waiting up to 5m0s for pod "busybox-readonly-false-6afcca8f-3a05-4cf4-8302-707b858cb73c" in namespace "security-context-test-3578" to be "Succeeded or Failed" +Aug 24 12:21:30.305: INFO: Pod "busybox-readonly-false-6afcca8f-3a05-4cf4-8302-707b858cb73c": Phase="Pending", Reason="", readiness=false. Elapsed: 6.279307ms +Aug 24 12:21:32.316: INFO: Pod "busybox-readonly-false-6afcca8f-3a05-4cf4-8302-707b858cb73c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017188845s +Aug 24 12:21:34.313: INFO: Pod "busybox-readonly-false-6afcca8f-3a05-4cf4-8302-707b858cb73c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01349695s +Aug 24 12:21:34.313: INFO: Pod "busybox-readonly-false-6afcca8f-3a05-4cf4-8302-707b858cb73c" satisfied condition "Succeeded or Failed" +[AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 -Jul 29 16:15:12.316: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Secrets +Aug 24 12:21:34.313: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Secrets +[DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Secrets +[DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-7118" for this suite. 07/29/23 16:15:12.325 +STEP: Destroying namespace "security-context-test-3578" for this suite. 08/24/23 12:21:34.323 ------------------------------ -• [4.185 seconds] -[sig-node] Secrets +• [4.119 seconds] +[sig-node] Security Context test/e2e/common/node/framework.go:23 - should be consumable from pods in env vars [NodeConformance] [Conformance] - test/e2e/common/node/secrets.go:46 + When creating a pod with readOnlyRootFilesystem + test/e2e/common/node/security_context.go:430 + should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:486 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Secrets + [BeforeEach] [sig-node] Security Context set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:15:08.155 - Jul 29 16:15:08.155: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 16:15:08.159 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:08.196 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:08.203 - [BeforeEach] [sig-node] Secrets + STEP: Creating a kubernetes client 08/24/23 12:21:30.215 + Aug 24 12:21:30.215: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename security-context-test 08/24/23 12:21:30.218 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:30.266 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:30.276 + [BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in env vars [NodeConformance] [Conformance] - test/e2e/common/node/secrets.go:46 - STEP: Creating secret with name secret-test-39bbb299-fd85-4822-ac56-d119ab33d5be 07/29/23 16:15:08.209 - STEP: Creating a pod to test consume secrets 07/29/23 16:15:08.218 - Jul 29 16:15:08.231: INFO: Waiting up to 5m0s for pod "pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9" in namespace "secrets-7118" to be "Succeeded or Failed" - Jul 29 16:15:08.239: INFO: Pod "pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9": Phase="Pending", Reason="", readiness=false. Elapsed: 8.179301ms - Jul 29 16:15:10.248: INFO: Pod "pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01625112s - Jul 29 16:15:12.249: INFO: Pod "pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017843784s - STEP: Saw pod success 07/29/23 16:15:12.249 - Jul 29 16:15:12.250: INFO: Pod "pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9" satisfied condition "Succeeded or Failed" - Jul 29 16:15:12.257: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9 container secret-env-test: - STEP: delete the pod 07/29/23 16:15:12.275 - Jul 29 16:15:12.301: INFO: Waiting for pod pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9 to disappear - Jul 29 16:15:12.316: INFO: Pod pod-secrets-420b3215-70dd-4515-9f47-704fc9d550d9 no longer exists - [AfterEach] [sig-node] Secrets + [BeforeEach] [sig-node] Security Context + test/e2e/common/node/security_context.go:50 + [It] should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:486 + Aug 24 12:21:30.299: INFO: Waiting up to 5m0s for pod "busybox-readonly-false-6afcca8f-3a05-4cf4-8302-707b858cb73c" in namespace "security-context-test-3578" to be "Succeeded or Failed" + Aug 24 12:21:30.305: INFO: Pod "busybox-readonly-false-6afcca8f-3a05-4cf4-8302-707b858cb73c": Phase="Pending", Reason="", readiness=false. Elapsed: 6.279307ms + Aug 24 12:21:32.316: INFO: Pod "busybox-readonly-false-6afcca8f-3a05-4cf4-8302-707b858cb73c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017188845s + Aug 24 12:21:34.313: INFO: Pod "busybox-readonly-false-6afcca8f-3a05-4cf4-8302-707b858cb73c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01349695s + Aug 24 12:21:34.313: INFO: Pod "busybox-readonly-false-6afcca8f-3a05-4cf4-8302-707b858cb73c" satisfied condition "Succeeded or Failed" + [AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 - Jul 29 16:15:12.316: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Secrets + Aug 24 12:21:34.313: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Secrets + [DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Secrets + [DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-7118" for this suite. 07/29/23 16:15:12.325 + STEP: Destroying namespace "security-context-test-3578" for this suite. 08/24/23 12:21:34.323 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl diff - should check if kubectl diff finds a difference for Deployments [Conformance] - test/e2e/kubectl/kubectl.go:931 -[BeforeEach] [sig-cli] Kubectl client +[sig-network] EndpointSlice + should support creating EndpointSlice API operations [Conformance] + test/e2e/network/endpointslice.go:353 +[BeforeEach] [sig-network] EndpointSlice set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:15:12.344 -Jul 29 16:15:12.344: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:15:12.348 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:12.394 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:12.4 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 12:21:34.337 +Aug 24 12:21:34.337: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename endpointslice 08/24/23 12:21:34.339 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:34.368 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:34.373 +[BeforeEach] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[It] should check if kubectl diff finds a difference for Deployments [Conformance] - test/e2e/kubectl/kubectl.go:931 -STEP: create deployment with httpd image 07/29/23 16:15:12.405 -Jul 29 16:15:12.406: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2155 create -f -' -Jul 29 16:15:13.777: INFO: stderr: "" -Jul 29 16:15:13.777: INFO: stdout: "deployment.apps/httpd-deployment created\n" -STEP: verify diff finds difference between live and declared image 07/29/23 16:15:13.777 -Jul 29 16:15:13.778: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2155 diff -f -' -Jul 29 16:15:14.434: INFO: rc: 1 -Jul 29 16:15:14.434: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2155 delete -f -' -Jul 29 16:15:14.741: INFO: stderr: "" -Jul 29 16:15:14.741: INFO: stdout: "deployment.apps \"httpd-deployment\" deleted\n" -[AfterEach] [sig-cli] Kubectl client +[BeforeEach] [sig-network] EndpointSlice + test/e2e/network/endpointslice.go:52 +[It] should support creating EndpointSlice API operations [Conformance] + test/e2e/network/endpointslice.go:353 +STEP: getting /apis 08/24/23 12:21:34.38 +STEP: getting /apis/discovery.k8s.io 08/24/23 12:21:34.385 +STEP: getting /apis/discovery.k8s.iov1 08/24/23 12:21:34.387 +STEP: creating 08/24/23 12:21:34.39 +STEP: getting 08/24/23 12:21:34.416 +STEP: listing 08/24/23 12:21:34.421 +STEP: watching 08/24/23 12:21:34.427 +Aug 24 12:21:34.427: INFO: starting watch +STEP: cluster-wide listing 08/24/23 12:21:34.43 +STEP: cluster-wide watching 08/24/23 12:21:34.437 +Aug 24 12:21:34.437: INFO: starting watch +STEP: patching 08/24/23 12:21:34.443 +STEP: updating 08/24/23 12:21:34.452 +Aug 24 12:21:34.465: INFO: waiting for watch events with expected annotations +Aug 24 12:21:34.466: INFO: saw patched and updated annotations +STEP: deleting 08/24/23 12:21:34.466 +STEP: deleting a collection 08/24/23 12:21:34.491 +[AfterEach] [sig-network] EndpointSlice test/e2e/framework/node/init/init.go:32 -Jul 29 16:15:14.741: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 12:21:34.519: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-network] EndpointSlice dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-network] EndpointSlice tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-2155" for this suite. 07/29/23 16:15:14.753 +STEP: Destroying namespace "endpointslice-5025" for this suite. 08/24/23 12:21:34.527 ------------------------------ -• [2.423 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Kubectl diff - test/e2e/kubectl/kubectl.go:925 - should check if kubectl diff finds a difference for Deployments [Conformance] - test/e2e/kubectl/kubectl.go:931 +• [0.201 seconds] +[sig-network] EndpointSlice +test/e2e/network/common/framework.go:23 + should support creating EndpointSlice API operations [Conformance] + test/e2e/network/endpointslice.go:353 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-network] EndpointSlice set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:15:12.344 - Jul 29 16:15:12.344: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:15:12.348 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:12.394 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:12.4 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 12:21:34.337 + Aug 24 12:21:34.337: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename endpointslice 08/24/23 12:21:34.339 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:34.368 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:34.373 + [BeforeEach] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [It] should check if kubectl diff finds a difference for Deployments [Conformance] - test/e2e/kubectl/kubectl.go:931 - STEP: create deployment with httpd image 07/29/23 16:15:12.405 - Jul 29 16:15:12.406: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2155 create -f -' - Jul 29 16:15:13.777: INFO: stderr: "" - Jul 29 16:15:13.777: INFO: stdout: "deployment.apps/httpd-deployment created\n" - STEP: verify diff finds difference between live and declared image 07/29/23 16:15:13.777 - Jul 29 16:15:13.778: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2155 diff -f -' - Jul 29 16:15:14.434: INFO: rc: 1 - Jul 29 16:15:14.434: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2155 delete -f -' - Jul 29 16:15:14.741: INFO: stderr: "" - Jul 29 16:15:14.741: INFO: stdout: "deployment.apps \"httpd-deployment\" deleted\n" - [AfterEach] [sig-cli] Kubectl client + [BeforeEach] [sig-network] EndpointSlice + test/e2e/network/endpointslice.go:52 + [It] should support creating EndpointSlice API operations [Conformance] + test/e2e/network/endpointslice.go:353 + STEP: getting /apis 08/24/23 12:21:34.38 + STEP: getting /apis/discovery.k8s.io 08/24/23 12:21:34.385 + STEP: getting /apis/discovery.k8s.iov1 08/24/23 12:21:34.387 + STEP: creating 08/24/23 12:21:34.39 + STEP: getting 08/24/23 12:21:34.416 + STEP: listing 08/24/23 12:21:34.421 + STEP: watching 08/24/23 12:21:34.427 + Aug 24 12:21:34.427: INFO: starting watch + STEP: cluster-wide listing 08/24/23 12:21:34.43 + STEP: cluster-wide watching 08/24/23 12:21:34.437 + Aug 24 12:21:34.437: INFO: starting watch + STEP: patching 08/24/23 12:21:34.443 + STEP: updating 08/24/23 12:21:34.452 + Aug 24 12:21:34.465: INFO: waiting for watch events with expected annotations + Aug 24 12:21:34.466: INFO: saw patched and updated annotations + STEP: deleting 08/24/23 12:21:34.466 + STEP: deleting a collection 08/24/23 12:21:34.491 + [AfterEach] [sig-network] EndpointSlice test/e2e/framework/node/init/init.go:32 - Jul 29 16:15:14.741: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 12:21:34.519: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-network] EndpointSlice dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-network] EndpointSlice tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-2155" for this suite. 07/29/23 16:15:14.753 + STEP: Destroying namespace "endpointslice-5025" for this suite. 08/24/23 12:21:34.527 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSS ------------------------------ -[sig-node] Pods Extended Pods Set QOS Class - should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] - test/e2e/node/pods.go:161 -[BeforeEach] [sig-node] Pods Extended +[sig-scheduling] LimitRange + should list, patch and delete a LimitRange by collection [Conformance] + test/e2e/scheduling/limit_range.go:239 +[BeforeEach] [sig-scheduling] LimitRange set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:15:14.769 -Jul 29 16:15:14.769: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 16:15:14.78 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:14.84 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:14.846 -[BeforeEach] [sig-node] Pods Extended +STEP: Creating a kubernetes client 08/24/23 12:21:34.538 +Aug 24 12:21:34.538: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename limitrange 08/24/23 12:21:34.54 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:34.568 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:34.573 +[BeforeEach] [sig-scheduling] LimitRange test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] Pods Set QOS Class - test/e2e/node/pods.go:152 -[It] should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] - test/e2e/node/pods.go:161 -STEP: creating the pod 07/29/23 16:15:14.851 -STEP: submitting the pod to kubernetes 07/29/23 16:15:14.852 -STEP: verifying QOS class is set on the pod 07/29/23 16:15:14.871 -[AfterEach] [sig-node] Pods Extended +[It] should list, patch and delete a LimitRange by collection [Conformance] + test/e2e/scheduling/limit_range.go:239 +STEP: Creating LimitRange "e2e-limitrange-c5ttz" in namespace "limitrange-8653" 08/24/23 12:21:34.577 +STEP: Creating another limitRange in another namespace 08/24/23 12:21:34.587 +Aug 24 12:21:34.620: INFO: Namespace "e2e-limitrange-c5ttz-3029" created +Aug 24 12:21:34.620: INFO: Creating LimitRange "e2e-limitrange-c5ttz" in namespace "e2e-limitrange-c5ttz-3029" +STEP: Listing all LimitRanges with label "e2e-test=e2e-limitrange-c5ttz" 08/24/23 12:21:34.631 +Aug 24 12:21:34.639: INFO: Found 2 limitRanges +STEP: Patching LimitRange "e2e-limitrange-c5ttz" in "limitrange-8653" namespace 08/24/23 12:21:34.639 +Aug 24 12:21:34.656: INFO: LimitRange "e2e-limitrange-c5ttz" has been patched +STEP: Delete LimitRange "e2e-limitrange-c5ttz" by Collection with labelSelector: "e2e-limitrange-c5ttz=patched" 08/24/23 12:21:34.656 +STEP: Confirm that the limitRange "e2e-limitrange-c5ttz" has been deleted 08/24/23 12:21:34.68 +Aug 24 12:21:34.681: INFO: Requesting list of LimitRange to confirm quantity +Aug 24 12:21:34.686: INFO: Found 0 LimitRange with label "e2e-limitrange-c5ttz=patched" +Aug 24 12:21:34.686: INFO: LimitRange "e2e-limitrange-c5ttz" has been deleted. +STEP: Confirm that a single LimitRange still exists with label "e2e-test=e2e-limitrange-c5ttz" 08/24/23 12:21:34.686 +Aug 24 12:21:34.693: INFO: Found 1 limitRange +[AfterEach] [sig-scheduling] LimitRange test/e2e/framework/node/init/init.go:32 -Jul 29 16:15:14.884: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods Extended +Aug 24 12:21:34.693: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-scheduling] LimitRange test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods Extended +[DeferCleanup (Each)] [sig-scheduling] LimitRange dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods Extended +[DeferCleanup (Each)] [sig-scheduling] LimitRange tear down framework | framework.go:193 -STEP: Destroying namespace "pods-3216" for this suite. 07/29/23 16:15:14.902 +STEP: Destroying namespace "limitrange-8653" for this suite. 08/24/23 12:21:34.702 +STEP: Destroying namespace "e2e-limitrange-c5ttz-3029" for this suite. 08/24/23 12:21:34.712 ------------------------------ -• [0.145 seconds] -[sig-node] Pods Extended -test/e2e/node/framework.go:23 - Pods Set QOS Class - test/e2e/node/pods.go:150 - should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] - test/e2e/node/pods.go:161 +• [0.182 seconds] +[sig-scheduling] LimitRange +test/e2e/scheduling/framework.go:40 + should list, patch and delete a LimitRange by collection [Conformance] + test/e2e/scheduling/limit_range.go:239 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods Extended + [BeforeEach] [sig-scheduling] LimitRange set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:15:14.769 - Jul 29 16:15:14.769: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 16:15:14.78 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:14.84 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:14.846 - [BeforeEach] [sig-node] Pods Extended + STEP: Creating a kubernetes client 08/24/23 12:21:34.538 + Aug 24 12:21:34.538: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename limitrange 08/24/23 12:21:34.54 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:34.568 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:34.573 + [BeforeEach] [sig-scheduling] LimitRange test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] Pods Set QOS Class - test/e2e/node/pods.go:152 - [It] should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] - test/e2e/node/pods.go:161 - STEP: creating the pod 07/29/23 16:15:14.851 - STEP: submitting the pod to kubernetes 07/29/23 16:15:14.852 - STEP: verifying QOS class is set on the pod 07/29/23 16:15:14.871 - [AfterEach] [sig-node] Pods Extended + [It] should list, patch and delete a LimitRange by collection [Conformance] + test/e2e/scheduling/limit_range.go:239 + STEP: Creating LimitRange "e2e-limitrange-c5ttz" in namespace "limitrange-8653" 08/24/23 12:21:34.577 + STEP: Creating another limitRange in another namespace 08/24/23 12:21:34.587 + Aug 24 12:21:34.620: INFO: Namespace "e2e-limitrange-c5ttz-3029" created + Aug 24 12:21:34.620: INFO: Creating LimitRange "e2e-limitrange-c5ttz" in namespace "e2e-limitrange-c5ttz-3029" + STEP: Listing all LimitRanges with label "e2e-test=e2e-limitrange-c5ttz" 08/24/23 12:21:34.631 + Aug 24 12:21:34.639: INFO: Found 2 limitRanges + STEP: Patching LimitRange "e2e-limitrange-c5ttz" in "limitrange-8653" namespace 08/24/23 12:21:34.639 + Aug 24 12:21:34.656: INFO: LimitRange "e2e-limitrange-c5ttz" has been patched + STEP: Delete LimitRange "e2e-limitrange-c5ttz" by Collection with labelSelector: "e2e-limitrange-c5ttz=patched" 08/24/23 12:21:34.656 + STEP: Confirm that the limitRange "e2e-limitrange-c5ttz" has been deleted 08/24/23 12:21:34.68 + Aug 24 12:21:34.681: INFO: Requesting list of LimitRange to confirm quantity + Aug 24 12:21:34.686: INFO: Found 0 LimitRange with label "e2e-limitrange-c5ttz=patched" + Aug 24 12:21:34.686: INFO: LimitRange "e2e-limitrange-c5ttz" has been deleted. + STEP: Confirm that a single LimitRange still exists with label "e2e-test=e2e-limitrange-c5ttz" 08/24/23 12:21:34.686 + Aug 24 12:21:34.693: INFO: Found 1 limitRange + [AfterEach] [sig-scheduling] LimitRange test/e2e/framework/node/init/init.go:32 - Jul 29 16:15:14.884: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods Extended + Aug 24 12:21:34.693: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-scheduling] LimitRange test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods Extended + [DeferCleanup (Each)] [sig-scheduling] LimitRange dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods Extended + [DeferCleanup (Each)] [sig-scheduling] LimitRange tear down framework | framework.go:193 - STEP: Destroying namespace "pods-3216" for this suite. 07/29/23 16:15:14.902 + STEP: Destroying namespace "limitrange-8653" for this suite. 08/24/23 12:21:34.702 + STEP: Destroying namespace "e2e-limitrange-c5ttz-3029" for this suite. 08/24/23 12:21:34.712 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should find a service from listing all namespaces [Conformance] - test/e2e/network/service.go:3219 -[BeforeEach] [sig-network] Services +[sig-apps] ReplicationController + should adopt matching pods on creation [Conformance] + test/e2e/apps/rc.go:92 +[BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:15:14.916 -Jul 29 16:15:14.916: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 16:15:14.925 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:15.033 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:15.037 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 12:21:34.738 +Aug 24 12:21:34.738: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replication-controller 08/24/23 12:21:34.74 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:34.776 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:34.78 +[BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should find a service from listing all namespaces [Conformance] - test/e2e/network/service.go:3219 -STEP: fetching services 07/29/23 16:15:15.042 -[AfterEach] [sig-network] Services +[BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 +[It] should adopt matching pods on creation [Conformance] + test/e2e/apps/rc.go:92 +STEP: Given a Pod with a 'name' label pod-adoption is created 08/24/23 12:21:34.787 +Aug 24 12:21:34.803: INFO: Waiting up to 5m0s for pod "pod-adoption" in namespace "replication-controller-6780" to be "running and ready" +Aug 24 12:21:34.813: INFO: Pod "pod-adoption": Phase="Pending", Reason="", readiness=false. Elapsed: 9.445319ms +Aug 24 12:21:34.813: INFO: The phase of Pod pod-adoption is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:21:36.824: INFO: Pod "pod-adoption": Phase="Running", Reason="", readiness=true. Elapsed: 2.020611273s +Aug 24 12:21:36.824: INFO: The phase of Pod pod-adoption is Running (Ready = true) +Aug 24 12:21:36.824: INFO: Pod "pod-adoption" satisfied condition "running and ready" +STEP: When a replication controller with a matching selector is created 08/24/23 12:21:36.83 +STEP: Then the orphan pod is adopted 08/24/23 12:21:36.844 +[AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 -Jul 29 16:15:15.049: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 12:21:37.859: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 -STEP: Destroying namespace "services-3504" for this suite. 07/29/23 16:15:15.057 +STEP: Destroying namespace "replication-controller-6780" for this suite. 08/24/23 12:21:37.869 ------------------------------ -• [0.152 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should find a service from listing all namespaces [Conformance] - test/e2e/network/service.go:3219 +• [3.142 seconds] +[sig-apps] ReplicationController +test/e2e/apps/framework.go:23 + should adopt matching pods on creation [Conformance] + test/e2e/apps/rc.go:92 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:15:14.916 - Jul 29 16:15:14.916: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 16:15:14.925 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:15.033 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:15.037 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 12:21:34.738 + Aug 24 12:21:34.738: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replication-controller 08/24/23 12:21:34.74 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:34.776 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:34.78 + [BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should find a service from listing all namespaces [Conformance] - test/e2e/network/service.go:3219 - STEP: fetching services 07/29/23 16:15:15.042 - [AfterEach] [sig-network] Services + [BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 + [It] should adopt matching pods on creation [Conformance] + test/e2e/apps/rc.go:92 + STEP: Given a Pod with a 'name' label pod-adoption is created 08/24/23 12:21:34.787 + Aug 24 12:21:34.803: INFO: Waiting up to 5m0s for pod "pod-adoption" in namespace "replication-controller-6780" to be "running and ready" + Aug 24 12:21:34.813: INFO: Pod "pod-adoption": Phase="Pending", Reason="", readiness=false. Elapsed: 9.445319ms + Aug 24 12:21:34.813: INFO: The phase of Pod pod-adoption is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:21:36.824: INFO: Pod "pod-adoption": Phase="Running", Reason="", readiness=true. Elapsed: 2.020611273s + Aug 24 12:21:36.824: INFO: The phase of Pod pod-adoption is Running (Ready = true) + Aug 24 12:21:36.824: INFO: Pod "pod-adoption" satisfied condition "running and ready" + STEP: When a replication controller with a matching selector is created 08/24/23 12:21:36.83 + STEP: Then the orphan pod is adopted 08/24/23 12:21:36.844 + [AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 - Jul 29 16:15:15.049: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 12:21:37.859: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 - STEP: Destroying namespace "services-3504" for this suite. 07/29/23 16:15:15.057 + STEP: Destroying namespace "replication-controller-6780" for this suite. 08/24/23 12:21:37.869 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Namespaces [Serial] - should apply an update to a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:366 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +[sig-cli] Kubectl client Guestbook application + should create and stop a working application [Conformance] + test/e2e/kubectl/kubectl.go:394 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:15:15.073 -Jul 29 16:15:15.073: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename namespaces 07/29/23 16:15:15.077 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:15.124 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:15.131 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +STEP: Creating a kubernetes client 08/24/23 12:21:37.888 +Aug 24 12:21:37.888: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:21:37.89 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:37.917 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:37.922 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[It] should apply an update to a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:366 -STEP: Updating Namespace "namespaces-4016" 07/29/23 16:15:15.139 -Jul 29 16:15:15.157: INFO: Namespace "namespaces-4016" now has labels, map[string]string{"e2e-framework":"namespaces", "e2e-run":"d0d188fc-d094-4f2b-8739-c618e26462b8", "kubernetes.io/metadata.name":"namespaces-4016", "namespaces-4016":"updated", "pod-security.kubernetes.io/enforce":"baseline"} -[AfterEach] [sig-api-machinery] Namespaces [Serial] - test/e2e/framework/node/init/init.go:32 -Jul 29 16:15:15.157: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] - tear down framework | framework.go:193 -STEP: Destroying namespace "namespaces-4016" for this suite. 07/29/23 16:15:15.171 ------------------------------- -• [0.110 seconds] -[sig-api-machinery] Namespaces [Serial] -test/e2e/apimachinery/framework.go:23 - should apply an update to a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:366 +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[It] should create and stop a working application [Conformance] + test/e2e/kubectl/kubectl.go:394 +STEP: creating all guestbook components 08/24/23 12:21:37.928 +Aug 24 12:21:37.928: INFO: apiVersion: v1 +kind: Service +metadata: + name: agnhost-replica + labels: + app: agnhost + role: replica + tier: backend +spec: + ports: + - port: 6379 + selector: + app: agnhost + role: replica + tier: backend + +Aug 24 12:21:37.929: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' +Aug 24 12:21:38.564: INFO: stderr: "" +Aug 24 12:21:38.564: INFO: stdout: "service/agnhost-replica created\n" +Aug 24 12:21:38.564: INFO: apiVersion: v1 +kind: Service +metadata: + name: agnhost-primary + labels: + app: agnhost + role: primary + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: agnhost + role: primary + tier: backend + +Aug 24 12:21:38.564: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' +Aug 24 12:21:39.934: INFO: stderr: "" +Aug 24 12:21:39.934: INFO: stdout: "service/agnhost-primary created\n" +Aug 24 12:21:39.934: INFO: apiVersion: v1 +kind: Service +metadata: + name: frontend + labels: + app: guestbook + tier: frontend +spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer + ports: + - port: 80 + selector: + app: guestbook + tier: frontend + +Aug 24 12:21:39.935: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' +Aug 24 12:21:40.575: INFO: stderr: "" +Aug 24 12:21:40.575: INFO: stdout: "service/frontend created\n" +Aug 24 12:21:40.575: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend +spec: + replicas: 3 + selector: + matchLabels: + app: guestbook + tier: frontend + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + containers: + - name: guestbook-frontend + image: registry.k8s.io/e2e-test-images/agnhost:2.43 + args: [ "guestbook", "--backend-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 80 + +Aug 24 12:21:40.576: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' +Aug 24 12:21:41.045: INFO: stderr: "" +Aug 24 12:21:41.045: INFO: stdout: "deployment.apps/frontend created\n" +Aug 24 12:21:41.045: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: agnhost-primary +spec: + replicas: 1 + selector: + matchLabels: + app: agnhost + role: primary + tier: backend + template: + metadata: + labels: + app: agnhost + role: primary + tier: backend + spec: + containers: + - name: primary + image: registry.k8s.io/e2e-test-images/agnhost:2.43 + args: [ "guestbook", "--http-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 + +Aug 24 12:21:41.046: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' +Aug 24 12:21:41.585: INFO: stderr: "" +Aug 24 12:21:41.585: INFO: stdout: "deployment.apps/agnhost-primary created\n" +Aug 24 12:21:41.586: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: agnhost-replica +spec: + replicas: 2 + selector: + matchLabels: + app: agnhost + role: replica + tier: backend + template: + metadata: + labels: + app: agnhost + role: replica + tier: backend + spec: + containers: + - name: replica + image: registry.k8s.io/e2e-test-images/agnhost:2.43 + args: [ "guestbook", "--replicaof", "agnhost-primary", "--http-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Namespaces [Serial] - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:15:15.073 - Jul 29 16:15:15.073: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename namespaces 07/29/23 16:15:15.077 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:15.124 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:15.131 - [BeforeEach] [sig-api-machinery] Namespaces [Serial] - test/e2e/framework/metrics/init/init.go:31 - [It] should apply an update to a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:366 - STEP: Updating Namespace "namespaces-4016" 07/29/23 16:15:15.139 - Jul 29 16:15:15.157: INFO: Namespace "namespaces-4016" now has labels, map[string]string{"e2e-framework":"namespaces", "e2e-run":"d0d188fc-d094-4f2b-8739-c618e26462b8", "kubernetes.io/metadata.name":"namespaces-4016", "namespaces-4016":"updated", "pod-security.kubernetes.io/enforce":"baseline"} - [AfterEach] [sig-api-machinery] Namespaces [Serial] - test/e2e/framework/node/init/init.go:32 - Jul 29 16:15:15.157: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] - tear down framework | framework.go:193 - STEP: Destroying namespace "namespaces-4016" for this suite. 07/29/23 16:15:15.171 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-node] NoExecuteTaintManager Single Pod [Serial] - removing taint cancels eviction [Disruptive] [Conformance] - test/e2e/node/taints.go:293 -[BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:15:15.187 -Jul 29 16:15:15.187: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename taint-single-pod 07/29/23 16:15:15.189 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:15.216 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:15.222 -[BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] - test/e2e/node/taints.go:170 -Jul 29 16:15:15.227: INFO: Waiting up to 1m0s for all nodes to be ready -Jul 29 16:16:15.277: INFO: Waiting for terminating namespaces to be deleted... -[It] removing taint cancels eviction [Disruptive] [Conformance] - test/e2e/node/taints.go:293 -Jul 29 16:16:15.285: INFO: Starting informer... -STEP: Starting pod... 07/29/23 16:16:15.285 -Jul 29 16:16:15.330: INFO: Pod is running on wetuj3nuajog-3. Tainting Node -STEP: Trying to apply a taint on the Node 07/29/23 16:16:15.331 -STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 07/29/23 16:16:15.37 -STEP: Waiting short time to make sure Pod is queued for deletion 07/29/23 16:16:15.38 -Jul 29 16:16:15.380: INFO: Pod wasn't evicted. Proceeding -Jul 29 16:16:15.380: INFO: Removing taint from Node -STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 07/29/23 16:16:15.407 -STEP: Waiting some time to make sure that toleration time passed. 07/29/23 16:16:15.413 -Jul 29 16:17:30.414: INFO: Pod wasn't evicted. Test successful -[AfterEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] +Aug 24 12:21:41.593: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' +Aug 24 12:21:42.460: INFO: stderr: "" +Aug 24 12:21:42.460: INFO: stdout: "deployment.apps/agnhost-replica created\n" +STEP: validating guestbook app 08/24/23 12:21:42.46 +Aug 24 12:21:42.460: INFO: Waiting for all frontend pods to be Running. +Aug 24 12:21:47.526: INFO: Waiting for frontend to serve content. +Aug 24 12:21:47.551: INFO: Trying to add a new entry to the guestbook. +Aug 24 12:21:47.573: INFO: Verifying that added entry can be retrieved. +STEP: using delete to clean up resources 08/24/23 12:21:47.597 +Aug 24 12:21:47.598: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' +Aug 24 12:21:47.755: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Aug 24 12:21:47.755: INFO: stdout: "service \"agnhost-replica\" force deleted\n" +STEP: using delete to clean up resources 08/24/23 12:21:47.755 +Aug 24 12:21:47.755: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' +Aug 24 12:21:47.968: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Aug 24 12:21:47.968: INFO: stdout: "service \"agnhost-primary\" force deleted\n" +STEP: using delete to clean up resources 08/24/23 12:21:47.968 +Aug 24 12:21:47.970: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' +Aug 24 12:21:48.185: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Aug 24 12:21:48.185: INFO: stdout: "service \"frontend\" force deleted\n" +STEP: using delete to clean up resources 08/24/23 12:21:48.186 +Aug 24 12:21:48.186: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' +Aug 24 12:21:48.329: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Aug 24 12:21:48.329: INFO: stdout: "deployment.apps \"frontend\" force deleted\n" +STEP: using delete to clean up resources 08/24/23 12:21:48.329 +Aug 24 12:21:48.330: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' +Aug 24 12:21:48.549: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Aug 24 12:21:48.549: INFO: stdout: "deployment.apps \"agnhost-primary\" force deleted\n" +STEP: using delete to clean up resources 08/24/23 12:21:48.549 +Aug 24 12:21:48.556: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' +Aug 24 12:21:48.782: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Aug 24 12:21:48.782: INFO: stdout: "deployment.apps \"agnhost-replica\" force deleted\n" +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 16:17:30.415: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] +Aug 24 12:21:48.782: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "taint-single-pod-5093" for this suite. 07/29/23 16:17:30.43 +STEP: Destroying namespace "kubectl-820" for this suite. 08/24/23 12:21:48.792 ------------------------------ -• [SLOW TEST] [135.257 seconds] -[sig-node] NoExecuteTaintManager Single Pod [Serial] -test/e2e/node/framework.go:23 - removing taint cancels eviction [Disruptive] [Conformance] - test/e2e/node/taints.go:293 +• [SLOW TEST] [10.918 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Guestbook application + test/e2e/kubectl/kubectl.go:369 + should create and stop a working application [Conformance] + test/e2e/kubectl/kubectl.go:394 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:15:15.187 - Jul 29 16:15:15.187: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename taint-single-pod 07/29/23 16:15:15.189 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:15:15.216 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:15:15.222 - [BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] + STEP: Creating a kubernetes client 08/24/23 12:21:37.888 + Aug 24 12:21:37.888: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:21:37.89 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:37.917 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:37.922 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] - test/e2e/node/taints.go:170 - Jul 29 16:15:15.227: INFO: Waiting up to 1m0s for all nodes to be ready - Jul 29 16:16:15.277: INFO: Waiting for terminating namespaces to be deleted... - [It] removing taint cancels eviction [Disruptive] [Conformance] - test/e2e/node/taints.go:293 - Jul 29 16:16:15.285: INFO: Starting informer... - STEP: Starting pod... 07/29/23 16:16:15.285 - Jul 29 16:16:15.330: INFO: Pod is running on wetuj3nuajog-3. Tainting Node - STEP: Trying to apply a taint on the Node 07/29/23 16:16:15.331 - STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 07/29/23 16:16:15.37 - STEP: Waiting short time to make sure Pod is queued for deletion 07/29/23 16:16:15.38 - Jul 29 16:16:15.380: INFO: Pod wasn't evicted. Proceeding - Jul 29 16:16:15.380: INFO: Removing taint from Node - STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 07/29/23 16:16:15.407 - STEP: Waiting some time to make sure that toleration time passed. 07/29/23 16:16:15.413 - Jul 29 16:17:30.414: INFO: Pod wasn't evicted. Test successful - [AfterEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] - test/e2e/framework/node/init/init.go:32 - Jul 29 16:17:30.415: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Single Pod [Serial] - tear down framework | framework.go:193 - STEP: Destroying namespace "taint-single-pod-5093" for this suite. 07/29/23 16:17:30.43 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSS ------------------------------- -[sig-storage] Projected downwardAPI - should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:68 -[BeforeEach] [sig-storage] Projected downwardAPI - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:17:30.448 -Jul 29 16:17:30.448: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:17:30.452 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:30.482 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:30.488 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:68 -STEP: Creating a pod to test downward API volume plugin 07/29/23 16:17:30.493 -Jul 29 16:17:30.508: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e" in namespace "projected-7440" to be "Succeeded or Failed" -Jul 29 16:17:30.517: INFO: Pod "downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e": Phase="Pending", Reason="", readiness=false. Elapsed: 8.109971ms -Jul 29 16:17:32.528: INFO: Pod "downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01967903s -Jul 29 16:17:34.525: INFO: Pod "downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016473931s -STEP: Saw pod success 07/29/23 16:17:34.525 -Jul 29 16:17:34.526: INFO: Pod "downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e" satisfied condition "Succeeded or Failed" -Jul 29 16:17:34.534: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e container client-container: -STEP: delete the pod 07/29/23 16:17:34.563 -Jul 29 16:17:34.584: INFO: Waiting for pod downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e to disappear -Jul 29 16:17:34.593: INFO: Pod downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e no longer exists -[AfterEach] [sig-storage] Projected downwardAPI - test/e2e/framework/node/init/init.go:32 -Jul 29 16:17:34.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI - tear down framework | framework.go:193 -STEP: Destroying namespace "projected-7440" for this suite. 07/29/23 16:17:34.604 ------------------------------- -• [4.171 seconds] -[sig-storage] Projected downwardAPI -test/e2e/common/storage/framework.go:23 - should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:68 + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [It] should create and stop a working application [Conformance] + test/e2e/kubectl/kubectl.go:394 + STEP: creating all guestbook components 08/24/23 12:21:37.928 + Aug 24 12:21:37.928: INFO: apiVersion: v1 + kind: Service + metadata: + name: agnhost-replica + labels: + app: agnhost + role: replica + tier: backend + spec: + ports: + - port: 6379 + selector: + app: agnhost + role: replica + tier: backend - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:17:30.448 - Jul 29 16:17:30.448: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:17:30.452 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:30.482 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:30.488 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:68 - STEP: Creating a pod to test downward API volume plugin 07/29/23 16:17:30.493 - Jul 29 16:17:30.508: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e" in namespace "projected-7440" to be "Succeeded or Failed" - Jul 29 16:17:30.517: INFO: Pod "downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e": Phase="Pending", Reason="", readiness=false. Elapsed: 8.109971ms - Jul 29 16:17:32.528: INFO: Pod "downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01967903s - Jul 29 16:17:34.525: INFO: Pod "downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016473931s - STEP: Saw pod success 07/29/23 16:17:34.525 - Jul 29 16:17:34.526: INFO: Pod "downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e" satisfied condition "Succeeded or Failed" - Jul 29 16:17:34.534: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e container client-container: - STEP: delete the pod 07/29/23 16:17:34.563 - Jul 29 16:17:34.584: INFO: Waiting for pod downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e to disappear - Jul 29 16:17:34.593: INFO: Pod downwardapi-volume-c4c6d286-4b0a-48e4-ba44-944c02d0835e no longer exists - [AfterEach] [sig-storage] Projected downwardAPI + Aug 24 12:21:37.929: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' + Aug 24 12:21:38.564: INFO: stderr: "" + Aug 24 12:21:38.564: INFO: stdout: "service/agnhost-replica created\n" + Aug 24 12:21:38.564: INFO: apiVersion: v1 + kind: Service + metadata: + name: agnhost-primary + labels: + app: agnhost + role: primary + tier: backend + spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: agnhost + role: primary + tier: backend + + Aug 24 12:21:38.564: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' + Aug 24 12:21:39.934: INFO: stderr: "" + Aug 24 12:21:39.934: INFO: stdout: "service/agnhost-primary created\n" + Aug 24 12:21:39.934: INFO: apiVersion: v1 + kind: Service + metadata: + name: frontend + labels: + app: guestbook + tier: frontend + spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer + ports: + - port: 80 + selector: + app: guestbook + tier: frontend + + Aug 24 12:21:39.935: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' + Aug 24 12:21:40.575: INFO: stderr: "" + Aug 24 12:21:40.575: INFO: stdout: "service/frontend created\n" + Aug 24 12:21:40.575: INFO: apiVersion: apps/v1 + kind: Deployment + metadata: + name: frontend + spec: + replicas: 3 + selector: + matchLabels: + app: guestbook + tier: frontend + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + containers: + - name: guestbook-frontend + image: registry.k8s.io/e2e-test-images/agnhost:2.43 + args: [ "guestbook", "--backend-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 80 + + Aug 24 12:21:40.576: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' + Aug 24 12:21:41.045: INFO: stderr: "" + Aug 24 12:21:41.045: INFO: stdout: "deployment.apps/frontend created\n" + Aug 24 12:21:41.045: INFO: apiVersion: apps/v1 + kind: Deployment + metadata: + name: agnhost-primary + spec: + replicas: 1 + selector: + matchLabels: + app: agnhost + role: primary + tier: backend + template: + metadata: + labels: + app: agnhost + role: primary + tier: backend + spec: + containers: + - name: primary + image: registry.k8s.io/e2e-test-images/agnhost:2.43 + args: [ "guestbook", "--http-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 + + Aug 24 12:21:41.046: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' + Aug 24 12:21:41.585: INFO: stderr: "" + Aug 24 12:21:41.585: INFO: stdout: "deployment.apps/agnhost-primary created\n" + Aug 24 12:21:41.586: INFO: apiVersion: apps/v1 + kind: Deployment + metadata: + name: agnhost-replica + spec: + replicas: 2 + selector: + matchLabels: + app: agnhost + role: replica + tier: backend + template: + metadata: + labels: + app: agnhost + role: replica + tier: backend + spec: + containers: + - name: replica + image: registry.k8s.io/e2e-test-images/agnhost:2.43 + args: [ "guestbook", "--replicaof", "agnhost-primary", "--http-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 + + Aug 24 12:21:41.593: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 create -f -' + Aug 24 12:21:42.460: INFO: stderr: "" + Aug 24 12:21:42.460: INFO: stdout: "deployment.apps/agnhost-replica created\n" + STEP: validating guestbook app 08/24/23 12:21:42.46 + Aug 24 12:21:42.460: INFO: Waiting for all frontend pods to be Running. + Aug 24 12:21:47.526: INFO: Waiting for frontend to serve content. + Aug 24 12:21:47.551: INFO: Trying to add a new entry to the guestbook. + Aug 24 12:21:47.573: INFO: Verifying that added entry can be retrieved. + STEP: using delete to clean up resources 08/24/23 12:21:47.597 + Aug 24 12:21:47.598: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' + Aug 24 12:21:47.755: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" + Aug 24 12:21:47.755: INFO: stdout: "service \"agnhost-replica\" force deleted\n" + STEP: using delete to clean up resources 08/24/23 12:21:47.755 + Aug 24 12:21:47.755: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' + Aug 24 12:21:47.968: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" + Aug 24 12:21:47.968: INFO: stdout: "service \"agnhost-primary\" force deleted\n" + STEP: using delete to clean up resources 08/24/23 12:21:47.968 + Aug 24 12:21:47.970: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' + Aug 24 12:21:48.185: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" + Aug 24 12:21:48.185: INFO: stdout: "service \"frontend\" force deleted\n" + STEP: using delete to clean up resources 08/24/23 12:21:48.186 + Aug 24 12:21:48.186: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' + Aug 24 12:21:48.329: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" + Aug 24 12:21:48.329: INFO: stdout: "deployment.apps \"frontend\" force deleted\n" + STEP: using delete to clean up resources 08/24/23 12:21:48.329 + Aug 24 12:21:48.330: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' + Aug 24 12:21:48.549: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" + Aug 24 12:21:48.549: INFO: stdout: "deployment.apps \"agnhost-primary\" force deleted\n" + STEP: using delete to clean up resources 08/24/23 12:21:48.549 + Aug 24 12:21:48.556: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-820 delete --grace-period=0 --force -f -' + Aug 24 12:21:48.782: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" + Aug 24 12:21:48.782: INFO: stdout: "deployment.apps \"agnhost-replica\" force deleted\n" + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 16:17:34.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 12:21:48.782: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "projected-7440" for this suite. 07/29/23 16:17:34.604 + STEP: Destroying namespace "kubectl-820" for this suite. 08/24/23 12:21:48.792 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +S ------------------------------ -[sig-cli] Kubectl client Kubectl logs - should be able to retrieve and filter logs [Conformance] - test/e2e/kubectl/kubectl.go:1592 -[BeforeEach] [sig-cli] Kubectl client +[sig-node] Downward API + should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:217 +[BeforeEach] [sig-node] Downward API set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:17:34.621 -Jul 29 16:17:34.622: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:17:34.627 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:34.658 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:34.665 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 12:21:48.809 +Aug 24 12:21:48.810: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:21:48.817 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:48.874 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:48.882 +[BeforeEach] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[BeforeEach] Kubectl logs - test/e2e/kubectl/kubectl.go:1572 -STEP: creating an pod 07/29/23 16:17:34.671 -Jul 29 16:17:34.671: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 run logs-generator --image=registry.k8s.io/e2e-test-images/agnhost:2.43 --restart=Never --pod-running-timeout=2m0s -- logs-generator --log-lines-total 100 --run-duration 20s' -Jul 29 16:17:34.873: INFO: stderr: "" -Jul 29 16:17:34.873: INFO: stdout: "pod/logs-generator created\n" -[It] should be able to retrieve and filter logs [Conformance] - test/e2e/kubectl/kubectl.go:1592 -STEP: Waiting for log generator to start. 07/29/23 16:17:34.874 -Jul 29 16:17:34.874: INFO: Waiting up to 5m0s for 1 pods to be running and ready, or succeeded: [logs-generator] -Jul 29 16:17:34.874: INFO: Waiting up to 5m0s for pod "logs-generator" in namespace "kubectl-2677" to be "running and ready, or succeeded" -Jul 29 16:17:34.890: INFO: Pod "logs-generator": Phase="Pending", Reason="", readiness=false. Elapsed: 15.446343ms -Jul 29 16:17:34.890: INFO: Error evaluating pod condition running and ready, or succeeded: want pod 'logs-generator' on 'wetuj3nuajog-3' to be 'Running' but was 'Pending' -Jul 29 16:17:36.898: INFO: Pod "logs-generator": Phase="Running", Reason="", readiness=true. Elapsed: 2.024020178s -Jul 29 16:17:36.898: INFO: Pod "logs-generator" satisfied condition "running and ready, or succeeded" -Jul 29 16:17:36.899: INFO: Wanted all 1 pods to be running and ready, or succeeded. Result: true. Pods: [logs-generator] -STEP: checking for a matching strings 07/29/23 16:17:36.899 -Jul 29 16:17:36.899: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator' -Jul 29 16:17:37.126: INFO: stderr: "" -Jul 29 16:17:37.126: INFO: stdout: "I0729 16:17:35.933391 1 logs_generator.go:76] 0 GET /api/v1/namespaces/ns/pods/glz 393\nI0729 16:17:36.133534 1 logs_generator.go:76] 1 GET /api/v1/namespaces/ns/pods/7bs 311\nI0729 16:17:36.334008 1 logs_generator.go:76] 2 PUT /api/v1/namespaces/default/pods/tkq7 415\nI0729 16:17:36.533214 1 logs_generator.go:76] 3 PUT /api/v1/namespaces/kube-system/pods/v2x 299\nI0729 16:17:36.733707 1 logs_generator.go:76] 4 GET /api/v1/namespaces/default/pods/pq5j 593\nI0729 16:17:36.933384 1 logs_generator.go:76] 5 POST /api/v1/namespaces/default/pods/m7d 498\n" -STEP: limiting log lines 07/29/23 16:17:37.126 -Jul 29 16:17:37.126: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator --tail=1' -Jul 29 16:17:37.309: INFO: stderr: "" -Jul 29 16:17:37.309: INFO: stdout: "I0729 16:17:37.134020 1 logs_generator.go:76] 6 GET /api/v1/namespaces/ns/pods/kl2 269\n" -Jul 29 16:17:37.309: INFO: got output "I0729 16:17:37.134020 1 logs_generator.go:76] 6 GET /api/v1/namespaces/ns/pods/kl2 269\n" -STEP: limiting log bytes 07/29/23 16:17:37.309 -Jul 29 16:17:37.310: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator --limit-bytes=1' -Jul 29 16:17:37.509: INFO: stderr: "" -Jul 29 16:17:37.509: INFO: stdout: "I" -Jul 29 16:17:37.509: INFO: got output "I" -STEP: exposing timestamps 07/29/23 16:17:37.509 -Jul 29 16:17:37.509: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator --tail=1 --timestamps' -Jul 29 16:17:37.650: INFO: stderr: "" -Jul 29 16:17:37.650: INFO: stdout: "2023-07-29T16:17:37.533520286Z I0729 16:17:37.533428 1 logs_generator.go:76] 8 PUT /api/v1/namespaces/ns/pods/g6b 221\n" -Jul 29 16:17:37.650: INFO: got output "2023-07-29T16:17:37.533520286Z I0729 16:17:37.533428 1 logs_generator.go:76] 8 PUT /api/v1/namespaces/ns/pods/g6b 221\n" -STEP: restricting to a time range 07/29/23 16:17:37.65 -Jul 29 16:17:40.152: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator --since=1s' -Jul 29 16:17:40.319: INFO: stderr: "" -Jul 29 16:17:40.319: INFO: stdout: "I0729 16:17:39.334139 1 logs_generator.go:76] 17 POST /api/v1/namespaces/kube-system/pods/jxqj 220\nI0729 16:17:39.533633 1 logs_generator.go:76] 18 POST /api/v1/namespaces/ns/pods/ssnp 280\nI0729 16:17:39.734120 1 logs_generator.go:76] 19 PUT /api/v1/namespaces/default/pods/gpk 470\nI0729 16:17:39.933644 1 logs_generator.go:76] 20 PUT /api/v1/namespaces/ns/pods/pt2 583\nI0729 16:17:40.133126 1 logs_generator.go:76] 21 GET /api/v1/namespaces/default/pods/mlg 205\n" -Jul 29 16:17:40.319: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator --since=24h' -Jul 29 16:17:40.464: INFO: stderr: "" -Jul 29 16:17:40.464: INFO: stdout: "I0729 16:17:35.933391 1 logs_generator.go:76] 0 GET /api/v1/namespaces/ns/pods/glz 393\nI0729 16:17:36.133534 1 logs_generator.go:76] 1 GET /api/v1/namespaces/ns/pods/7bs 311\nI0729 16:17:36.334008 1 logs_generator.go:76] 2 PUT /api/v1/namespaces/default/pods/tkq7 415\nI0729 16:17:36.533214 1 logs_generator.go:76] 3 PUT /api/v1/namespaces/kube-system/pods/v2x 299\nI0729 16:17:36.733707 1 logs_generator.go:76] 4 GET /api/v1/namespaces/default/pods/pq5j 593\nI0729 16:17:36.933384 1 logs_generator.go:76] 5 POST /api/v1/namespaces/default/pods/m7d 498\nI0729 16:17:37.134020 1 logs_generator.go:76] 6 GET /api/v1/namespaces/ns/pods/kl2 269\nI0729 16:17:37.333750 1 logs_generator.go:76] 7 POST /api/v1/namespaces/ns/pods/htb 333\nI0729 16:17:37.533428 1 logs_generator.go:76] 8 PUT /api/v1/namespaces/ns/pods/g6b 221\nI0729 16:17:37.734054 1 logs_generator.go:76] 9 POST /api/v1/namespaces/default/pods/zbm 522\nI0729 16:17:37.933634 1 logs_generator.go:76] 10 PUT /api/v1/namespaces/kube-system/pods/cxj 575\nI0729 16:17:38.134141 1 logs_generator.go:76] 11 PUT /api/v1/namespaces/kube-system/pods/xtw7 337\nI0729 16:17:38.333640 1 logs_generator.go:76] 12 POST /api/v1/namespaces/ns/pods/j5k7 412\nI0729 16:17:38.534139 1 logs_generator.go:76] 13 GET /api/v1/namespaces/ns/pods/4bbf 551\nI0729 16:17:38.733611 1 logs_generator.go:76] 14 POST /api/v1/namespaces/kube-system/pods/mdxv 582\nI0729 16:17:38.934094 1 logs_generator.go:76] 15 PUT /api/v1/namespaces/default/pods/swc 558\nI0729 16:17:39.133617 1 logs_generator.go:76] 16 POST /api/v1/namespaces/default/pods/rhf 268\nI0729 16:17:39.334139 1 logs_generator.go:76] 17 POST /api/v1/namespaces/kube-system/pods/jxqj 220\nI0729 16:17:39.533633 1 logs_generator.go:76] 18 POST /api/v1/namespaces/ns/pods/ssnp 280\nI0729 16:17:39.734120 1 logs_generator.go:76] 19 PUT /api/v1/namespaces/default/pods/gpk 470\nI0729 16:17:39.933644 1 logs_generator.go:76] 20 PUT /api/v1/namespaces/ns/pods/pt2 583\nI0729 16:17:40.133126 1 logs_generator.go:76] 21 GET /api/v1/namespaces/default/pods/mlg 205\nI0729 16:17:40.333124 1 logs_generator.go:76] 22 PUT /api/v1/namespaces/kube-system/pods/gfv 466\n" -[AfterEach] Kubectl logs - test/e2e/kubectl/kubectl.go:1577 -Jul 29 16:17:40.464: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 delete pod logs-generator' -Jul 29 16:17:41.037: INFO: stderr: "" -Jul 29 16:17:41.038: INFO: stdout: "pod \"logs-generator\" deleted\n" -[AfterEach] [sig-cli] Kubectl client +[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:217 +STEP: Creating a pod to test downward api env vars 08/24/23 12:21:48.897 +Aug 24 12:21:48.916: INFO: Waiting up to 5m0s for pod "downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3" in namespace "downward-api-9750" to be "Succeeded or Failed" +Aug 24 12:21:48.924: INFO: Pod "downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3": Phase="Pending", Reason="", readiness=false. Elapsed: 7.646101ms +Aug 24 12:21:50.939: INFO: Pod "downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022422239s +Aug 24 12:21:52.933: INFO: Pod "downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015818889s +STEP: Saw pod success 08/24/23 12:21:52.933 +Aug 24 12:21:52.933: INFO: Pod "downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3" satisfied condition "Succeeded or Failed" +Aug 24 12:21:52.939: INFO: Trying to get logs from node pe9deep4seen-3 pod downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3 container dapi-container: +STEP: delete the pod 08/24/23 12:21:52.955 +Aug 24 12:21:52.974: INFO: Waiting for pod downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3 to disappear +Aug 24 12:21:52.981: INFO: Pod downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3 no longer exists +[AfterEach] [sig-node] Downward API test/e2e/framework/node/init/init.go:32 -Jul 29 16:17:41.038: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 12:21:52.982: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-node] Downward API dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-node] Downward API tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-2677" for this suite. 07/29/23 16:17:41.046 +STEP: Destroying namespace "downward-api-9750" for this suite. 08/24/23 12:21:52.994 ------------------------------ -• [SLOW TEST] [6.437 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Kubectl logs - test/e2e/kubectl/kubectl.go:1569 - should be able to retrieve and filter logs [Conformance] - test/e2e/kubectl/kubectl.go:1592 +• [4.206 seconds] +[sig-node] Downward API +test/e2e/common/node/framework.go:23 + should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:217 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-node] Downward API set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:17:34.621 - Jul 29 16:17:34.622: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:17:34.627 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:34.658 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:34.665 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 12:21:48.809 + Aug 24 12:21:48.810: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:21:48.817 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:48.874 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:48.882 + [BeforeEach] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [BeforeEach] Kubectl logs - test/e2e/kubectl/kubectl.go:1572 - STEP: creating an pod 07/29/23 16:17:34.671 - Jul 29 16:17:34.671: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 run logs-generator --image=registry.k8s.io/e2e-test-images/agnhost:2.43 --restart=Never --pod-running-timeout=2m0s -- logs-generator --log-lines-total 100 --run-duration 20s' - Jul 29 16:17:34.873: INFO: stderr: "" - Jul 29 16:17:34.873: INFO: stdout: "pod/logs-generator created\n" - [It] should be able to retrieve and filter logs [Conformance] - test/e2e/kubectl/kubectl.go:1592 - STEP: Waiting for log generator to start. 07/29/23 16:17:34.874 - Jul 29 16:17:34.874: INFO: Waiting up to 5m0s for 1 pods to be running and ready, or succeeded: [logs-generator] - Jul 29 16:17:34.874: INFO: Waiting up to 5m0s for pod "logs-generator" in namespace "kubectl-2677" to be "running and ready, or succeeded" - Jul 29 16:17:34.890: INFO: Pod "logs-generator": Phase="Pending", Reason="", readiness=false. Elapsed: 15.446343ms - Jul 29 16:17:34.890: INFO: Error evaluating pod condition running and ready, or succeeded: want pod 'logs-generator' on 'wetuj3nuajog-3' to be 'Running' but was 'Pending' - Jul 29 16:17:36.898: INFO: Pod "logs-generator": Phase="Running", Reason="", readiness=true. Elapsed: 2.024020178s - Jul 29 16:17:36.898: INFO: Pod "logs-generator" satisfied condition "running and ready, or succeeded" - Jul 29 16:17:36.899: INFO: Wanted all 1 pods to be running and ready, or succeeded. Result: true. Pods: [logs-generator] - STEP: checking for a matching strings 07/29/23 16:17:36.899 - Jul 29 16:17:36.899: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator' - Jul 29 16:17:37.126: INFO: stderr: "" - Jul 29 16:17:37.126: INFO: stdout: "I0729 16:17:35.933391 1 logs_generator.go:76] 0 GET /api/v1/namespaces/ns/pods/glz 393\nI0729 16:17:36.133534 1 logs_generator.go:76] 1 GET /api/v1/namespaces/ns/pods/7bs 311\nI0729 16:17:36.334008 1 logs_generator.go:76] 2 PUT /api/v1/namespaces/default/pods/tkq7 415\nI0729 16:17:36.533214 1 logs_generator.go:76] 3 PUT /api/v1/namespaces/kube-system/pods/v2x 299\nI0729 16:17:36.733707 1 logs_generator.go:76] 4 GET /api/v1/namespaces/default/pods/pq5j 593\nI0729 16:17:36.933384 1 logs_generator.go:76] 5 POST /api/v1/namespaces/default/pods/m7d 498\n" - STEP: limiting log lines 07/29/23 16:17:37.126 - Jul 29 16:17:37.126: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator --tail=1' - Jul 29 16:17:37.309: INFO: stderr: "" - Jul 29 16:17:37.309: INFO: stdout: "I0729 16:17:37.134020 1 logs_generator.go:76] 6 GET /api/v1/namespaces/ns/pods/kl2 269\n" - Jul 29 16:17:37.309: INFO: got output "I0729 16:17:37.134020 1 logs_generator.go:76] 6 GET /api/v1/namespaces/ns/pods/kl2 269\n" - STEP: limiting log bytes 07/29/23 16:17:37.309 - Jul 29 16:17:37.310: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator --limit-bytes=1' - Jul 29 16:17:37.509: INFO: stderr: "" - Jul 29 16:17:37.509: INFO: stdout: "I" - Jul 29 16:17:37.509: INFO: got output "I" - STEP: exposing timestamps 07/29/23 16:17:37.509 - Jul 29 16:17:37.509: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator --tail=1 --timestamps' - Jul 29 16:17:37.650: INFO: stderr: "" - Jul 29 16:17:37.650: INFO: stdout: "2023-07-29T16:17:37.533520286Z I0729 16:17:37.533428 1 logs_generator.go:76] 8 PUT /api/v1/namespaces/ns/pods/g6b 221\n" - Jul 29 16:17:37.650: INFO: got output "2023-07-29T16:17:37.533520286Z I0729 16:17:37.533428 1 logs_generator.go:76] 8 PUT /api/v1/namespaces/ns/pods/g6b 221\n" - STEP: restricting to a time range 07/29/23 16:17:37.65 - Jul 29 16:17:40.152: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator --since=1s' - Jul 29 16:17:40.319: INFO: stderr: "" - Jul 29 16:17:40.319: INFO: stdout: "I0729 16:17:39.334139 1 logs_generator.go:76] 17 POST /api/v1/namespaces/kube-system/pods/jxqj 220\nI0729 16:17:39.533633 1 logs_generator.go:76] 18 POST /api/v1/namespaces/ns/pods/ssnp 280\nI0729 16:17:39.734120 1 logs_generator.go:76] 19 PUT /api/v1/namespaces/default/pods/gpk 470\nI0729 16:17:39.933644 1 logs_generator.go:76] 20 PUT /api/v1/namespaces/ns/pods/pt2 583\nI0729 16:17:40.133126 1 logs_generator.go:76] 21 GET /api/v1/namespaces/default/pods/mlg 205\n" - Jul 29 16:17:40.319: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 logs logs-generator logs-generator --since=24h' - Jul 29 16:17:40.464: INFO: stderr: "" - Jul 29 16:17:40.464: INFO: stdout: "I0729 16:17:35.933391 1 logs_generator.go:76] 0 GET /api/v1/namespaces/ns/pods/glz 393\nI0729 16:17:36.133534 1 logs_generator.go:76] 1 GET /api/v1/namespaces/ns/pods/7bs 311\nI0729 16:17:36.334008 1 logs_generator.go:76] 2 PUT /api/v1/namespaces/default/pods/tkq7 415\nI0729 16:17:36.533214 1 logs_generator.go:76] 3 PUT /api/v1/namespaces/kube-system/pods/v2x 299\nI0729 16:17:36.733707 1 logs_generator.go:76] 4 GET /api/v1/namespaces/default/pods/pq5j 593\nI0729 16:17:36.933384 1 logs_generator.go:76] 5 POST /api/v1/namespaces/default/pods/m7d 498\nI0729 16:17:37.134020 1 logs_generator.go:76] 6 GET /api/v1/namespaces/ns/pods/kl2 269\nI0729 16:17:37.333750 1 logs_generator.go:76] 7 POST /api/v1/namespaces/ns/pods/htb 333\nI0729 16:17:37.533428 1 logs_generator.go:76] 8 PUT /api/v1/namespaces/ns/pods/g6b 221\nI0729 16:17:37.734054 1 logs_generator.go:76] 9 POST /api/v1/namespaces/default/pods/zbm 522\nI0729 16:17:37.933634 1 logs_generator.go:76] 10 PUT /api/v1/namespaces/kube-system/pods/cxj 575\nI0729 16:17:38.134141 1 logs_generator.go:76] 11 PUT /api/v1/namespaces/kube-system/pods/xtw7 337\nI0729 16:17:38.333640 1 logs_generator.go:76] 12 POST /api/v1/namespaces/ns/pods/j5k7 412\nI0729 16:17:38.534139 1 logs_generator.go:76] 13 GET /api/v1/namespaces/ns/pods/4bbf 551\nI0729 16:17:38.733611 1 logs_generator.go:76] 14 POST /api/v1/namespaces/kube-system/pods/mdxv 582\nI0729 16:17:38.934094 1 logs_generator.go:76] 15 PUT /api/v1/namespaces/default/pods/swc 558\nI0729 16:17:39.133617 1 logs_generator.go:76] 16 POST /api/v1/namespaces/default/pods/rhf 268\nI0729 16:17:39.334139 1 logs_generator.go:76] 17 POST /api/v1/namespaces/kube-system/pods/jxqj 220\nI0729 16:17:39.533633 1 logs_generator.go:76] 18 POST /api/v1/namespaces/ns/pods/ssnp 280\nI0729 16:17:39.734120 1 logs_generator.go:76] 19 PUT /api/v1/namespaces/default/pods/gpk 470\nI0729 16:17:39.933644 1 logs_generator.go:76] 20 PUT /api/v1/namespaces/ns/pods/pt2 583\nI0729 16:17:40.133126 1 logs_generator.go:76] 21 GET /api/v1/namespaces/default/pods/mlg 205\nI0729 16:17:40.333124 1 logs_generator.go:76] 22 PUT /api/v1/namespaces/kube-system/pods/gfv 466\n" - [AfterEach] Kubectl logs - test/e2e/kubectl/kubectl.go:1577 - Jul 29 16:17:40.464: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-2677 delete pod logs-generator' - Jul 29 16:17:41.037: INFO: stderr: "" - Jul 29 16:17:41.038: INFO: stdout: "pod \"logs-generator\" deleted\n" - [AfterEach] [sig-cli] Kubectl client + [It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:217 + STEP: Creating a pod to test downward api env vars 08/24/23 12:21:48.897 + Aug 24 12:21:48.916: INFO: Waiting up to 5m0s for pod "downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3" in namespace "downward-api-9750" to be "Succeeded or Failed" + Aug 24 12:21:48.924: INFO: Pod "downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3": Phase="Pending", Reason="", readiness=false. Elapsed: 7.646101ms + Aug 24 12:21:50.939: INFO: Pod "downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022422239s + Aug 24 12:21:52.933: INFO: Pod "downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015818889s + STEP: Saw pod success 08/24/23 12:21:52.933 + Aug 24 12:21:52.933: INFO: Pod "downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3" satisfied condition "Succeeded or Failed" + Aug 24 12:21:52.939: INFO: Trying to get logs from node pe9deep4seen-3 pod downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3 container dapi-container: + STEP: delete the pod 08/24/23 12:21:52.955 + Aug 24 12:21:52.974: INFO: Waiting for pod downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3 to disappear + Aug 24 12:21:52.981: INFO: Pod downward-api-1995786d-aa1d-4932-bb72-9a57b96dd6e3 no longer exists + [AfterEach] [sig-node] Downward API test/e2e/framework/node/init/init.go:32 - Jul 29 16:17:41.038: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 12:21:52.982: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-node] Downward API dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-node] Downward API tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-2677" for this suite. 07/29/23 16:17:41.046 + STEP: Destroying namespace "downward-api-9750" for this suite. 08/24/23 12:21:52.994 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSS +SSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:127 -[BeforeEach] [sig-storage] EmptyDir volumes +[sig-auth] ServiceAccounts + should mount an API token into pods [Conformance] + test/e2e/auth/service_accounts.go:78 +[BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:17:41.066 -Jul 29 16:17:41.066: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 16:17:41.068 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:41.101 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:41.106 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 12:21:53.017 +Aug 24 12:21:53.018: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename svcaccounts 08/24/23 12:21:53.02 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:53.053 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:53.058 +[BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 -[It] should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:127 -STEP: Creating a pod to test emptydir 0644 on tmpfs 07/29/23 16:17:41.112 -Jul 29 16:17:41.127: INFO: Waiting up to 5m0s for pod "pod-44448c5f-32a0-4b03-9116-57c9aa98192a" in namespace "emptydir-4904" to be "Succeeded or Failed" -Jul 29 16:17:41.141: INFO: Pod "pod-44448c5f-32a0-4b03-9116-57c9aa98192a": Phase="Pending", Reason="", readiness=false. Elapsed: 14.388277ms -Jul 29 16:17:43.148: INFO: Pod "pod-44448c5f-32a0-4b03-9116-57c9aa98192a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021286096s -Jul 29 16:17:45.162: INFO: Pod "pod-44448c5f-32a0-4b03-9116-57c9aa98192a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.034723567s -STEP: Saw pod success 07/29/23 16:17:45.162 -Jul 29 16:17:45.162: INFO: Pod "pod-44448c5f-32a0-4b03-9116-57c9aa98192a" satisfied condition "Succeeded or Failed" -Jul 29 16:17:45.176: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-44448c5f-32a0-4b03-9116-57c9aa98192a container test-container: -STEP: delete the pod 07/29/23 16:17:45.192 -Jul 29 16:17:45.219: INFO: Waiting for pod pod-44448c5f-32a0-4b03-9116-57c9aa98192a to disappear -Jul 29 16:17:45.227: INFO: Pod pod-44448c5f-32a0-4b03-9116-57c9aa98192a no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[It] should mount an API token into pods [Conformance] + test/e2e/auth/service_accounts.go:78 +Aug 24 12:21:53.088: INFO: Waiting up to 5m0s for pod "pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580" in namespace "svcaccounts-8346" to be "running" +Aug 24 12:21:53.093: INFO: Pod "pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580": Phase="Pending", Reason="", readiness=false. Elapsed: 4.567644ms +Aug 24 12:21:55.099: INFO: Pod "pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580": Phase="Running", Reason="", readiness=true. Elapsed: 2.010804436s +Aug 24 12:21:55.099: INFO: Pod "pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580" satisfied condition "running" +STEP: reading a file in the container 08/24/23 12:21:55.099 +Aug 24 12:21:55.100: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-8346 pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token' +STEP: reading a file in the container 08/24/23 12:21:55.411 +Aug 24 12:21:55.412: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-8346 pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt' +STEP: reading a file in the container 08/24/23 12:21:55.673 +Aug 24 12:21:55.673: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-8346 pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace' +Aug 24 12:21:55.920: INFO: Got root ca configmap in namespace "svcaccounts-8346" +[AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 -Jul 29 16:17:45.227: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 12:21:55.924: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-4904" for this suite. 07/29/23 16:17:45.237 +STEP: Destroying namespace "svcaccounts-8346" for this suite. 08/24/23 12:21:55.933 ------------------------------ -• [4.182 seconds] -[sig-storage] EmptyDir volumes -test/e2e/common/storage/framework.go:23 - should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:127 +• [2.928 seconds] +[sig-auth] ServiceAccounts +test/e2e/auth/framework.go:23 + should mount an API token into pods [Conformance] + test/e2e/auth/service_accounts.go:78 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:17:41.066 - Jul 29 16:17:41.066: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 16:17:41.068 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:41.101 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:41.106 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 12:21:53.017 + Aug 24 12:21:53.018: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename svcaccounts 08/24/23 12:21:53.02 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:53.053 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:53.058 + [BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 - [It] should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:127 - STEP: Creating a pod to test emptydir 0644 on tmpfs 07/29/23 16:17:41.112 - Jul 29 16:17:41.127: INFO: Waiting up to 5m0s for pod "pod-44448c5f-32a0-4b03-9116-57c9aa98192a" in namespace "emptydir-4904" to be "Succeeded or Failed" - Jul 29 16:17:41.141: INFO: Pod "pod-44448c5f-32a0-4b03-9116-57c9aa98192a": Phase="Pending", Reason="", readiness=false. Elapsed: 14.388277ms - Jul 29 16:17:43.148: INFO: Pod "pod-44448c5f-32a0-4b03-9116-57c9aa98192a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021286096s - Jul 29 16:17:45.162: INFO: Pod "pod-44448c5f-32a0-4b03-9116-57c9aa98192a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.034723567s - STEP: Saw pod success 07/29/23 16:17:45.162 - Jul 29 16:17:45.162: INFO: Pod "pod-44448c5f-32a0-4b03-9116-57c9aa98192a" satisfied condition "Succeeded or Failed" - Jul 29 16:17:45.176: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-44448c5f-32a0-4b03-9116-57c9aa98192a container test-container: - STEP: delete the pod 07/29/23 16:17:45.192 - Jul 29 16:17:45.219: INFO: Waiting for pod pod-44448c5f-32a0-4b03-9116-57c9aa98192a to disappear - Jul 29 16:17:45.227: INFO: Pod pod-44448c5f-32a0-4b03-9116-57c9aa98192a no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [It] should mount an API token into pods [Conformance] + test/e2e/auth/service_accounts.go:78 + Aug 24 12:21:53.088: INFO: Waiting up to 5m0s for pod "pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580" in namespace "svcaccounts-8346" to be "running" + Aug 24 12:21:53.093: INFO: Pod "pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580": Phase="Pending", Reason="", readiness=false. Elapsed: 4.567644ms + Aug 24 12:21:55.099: INFO: Pod "pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580": Phase="Running", Reason="", readiness=true. Elapsed: 2.010804436s + Aug 24 12:21:55.099: INFO: Pod "pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580" satisfied condition "running" + STEP: reading a file in the container 08/24/23 12:21:55.099 + Aug 24 12:21:55.100: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-8346 pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token' + STEP: reading a file in the container 08/24/23 12:21:55.411 + Aug 24 12:21:55.412: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-8346 pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt' + STEP: reading a file in the container 08/24/23 12:21:55.673 + Aug 24 12:21:55.673: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-8346 pod-service-account-4a0590c0-b2e1-4b74-a69d-4b853a9a7580 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace' + Aug 24 12:21:55.920: INFO: Got root ca configmap in namespace "svcaccounts-8346" + [AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 - Jul 29 16:17:45.227: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 12:21:55.924: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-4904" for this suite. 07/29/23 16:17:45.237 + STEP: Destroying namespace "svcaccounts-8346" for this suite. 08/24/23 12:21:55.933 << End Captured GinkgoWriter Output ------------------------------ SSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should mutate custom resource with different stored version [Conformance] - test/e2e/apimachinery/webhook.go:323 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart exec hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:134 +[BeforeEach] [sig-node] Container Lifecycle Hook set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:17:45.248 -Jul 29 16:17:45.249: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 16:17:45.252 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:45.283 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:45.289 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:21:55.946 +Aug 24 12:21:55.946: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-lifecycle-hook 08/24/23 12:21:55.948 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:55.978 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:55.983 +[BeforeEach] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 16:17:45.327 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:17:46.775 -STEP: Deploying the webhook pod 07/29/23 16:17:46.797 -STEP: Wait for the deployment to be ready 07/29/23 16:17:46.818 -Jul 29 16:17:46.828: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -STEP: Deploying the webhook service 07/29/23 16:17:48.855 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:17:48.873 -Jul 29 16:17:49.874: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should mutate custom resource with different stored version [Conformance] - test/e2e/apimachinery/webhook.go:323 -Jul 29 16:17:49.881: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Registering the mutating webhook for custom resource e2e-test-webhook-2688-crds.webhook.example.com via the AdmissionRegistration API 07/29/23 16:17:50.408 -STEP: Creating a custom resource while v1 is storage version 07/29/23 16:17:50.448 -STEP: Patching Custom Resource Definition to set v2 as storage 07/29/23 16:17:52.802 -STEP: Patching the custom resource while v2 is storage version 07/29/23 16:17:52.831 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] when create a pod with lifecycle hook + test/e2e/common/node/lifecycle_hook.go:77 +STEP: create the container to handle the HTTPGet hook request. 08/24/23 12:21:55.998 +Aug 24 12:21:56.016: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-1487" to be "running and ready" +Aug 24 12:21:56.022: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 5.380547ms +Aug 24 12:21:56.022: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:21:58.030: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013856465s +Aug 24 12:21:58.030: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:22:00.032: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 4.015825286s +Aug 24 12:22:00.032: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) +Aug 24 12:22:00.032: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" +[It] should execute poststart exec hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:134 +STEP: create the pod with lifecycle hook 08/24/23 12:22:00.038 +Aug 24 12:22:00.049: INFO: Waiting up to 5m0s for pod "pod-with-poststart-exec-hook" in namespace "container-lifecycle-hook-1487" to be "running and ready" +Aug 24 12:22:00.059: INFO: Pod "pod-with-poststart-exec-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 10.163026ms +Aug 24 12:22:00.059: INFO: The phase of Pod pod-with-poststart-exec-hook is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:22:02.072: INFO: Pod "pod-with-poststart-exec-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.023383779s +Aug 24 12:22:02.072: INFO: The phase of Pod pod-with-poststart-exec-hook is Running (Ready = true) +Aug 24 12:22:02.073: INFO: Pod "pod-with-poststart-exec-hook" satisfied condition "running and ready" +STEP: check poststart hook 08/24/23 12:22:02.08 +STEP: delete the pod with lifecycle hook 08/24/23 12:22:02.13 +Aug 24 12:22:02.160: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Aug 24 12:22:02.171: INFO: Pod pod-with-poststart-exec-hook still exists +Aug 24 12:22:04.172: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Aug 24 12:22:04.180: INFO: Pod pod-with-poststart-exec-hook still exists +Aug 24 12:22:06.171: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Aug 24 12:22:06.179: INFO: Pod pod-with-poststart-exec-hook no longer exists +[AfterEach] [sig-node] Container Lifecycle Hook test/e2e/framework/node/init/init.go:32 -Jul 29 16:17:53.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 12:22:06.179: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-1048" for this suite. 07/29/23 16:17:53.876 -STEP: Destroying namespace "webhook-1048-markers" for this suite. 07/29/23 16:17:53.891 +STEP: Destroying namespace "container-lifecycle-hook-1487" for this suite. 08/24/23 12:22:06.189 ------------------------------ -• [SLOW TEST] [8.657 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should mutate custom resource with different stored version [Conformance] - test/e2e/apimachinery/webhook.go:323 +• [SLOW TEST] [10.253 seconds] +[sig-node] Container Lifecycle Hook +test/e2e/common/node/framework.go:23 + when create a pod with lifecycle hook + test/e2e/common/node/lifecycle_hook.go:46 + should execute poststart exec hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:134 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Container Lifecycle Hook set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:17:45.248 - Jul 29 16:17:45.249: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 16:17:45.252 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:45.283 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:45.289 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:21:55.946 + Aug 24 12:21:55.946: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-lifecycle-hook 08/24/23 12:21:55.948 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:21:55.978 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:21:55.983 + [BeforeEach] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 16:17:45.327 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:17:46.775 - STEP: Deploying the webhook pod 07/29/23 16:17:46.797 - STEP: Wait for the deployment to be ready 07/29/23 16:17:46.818 - Jul 29 16:17:46.828: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created - STEP: Deploying the webhook service 07/29/23 16:17:48.855 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:17:48.873 - Jul 29 16:17:49.874: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should mutate custom resource with different stored version [Conformance] - test/e2e/apimachinery/webhook.go:323 - Jul 29 16:17:49.881: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Registering the mutating webhook for custom resource e2e-test-webhook-2688-crds.webhook.example.com via the AdmissionRegistration API 07/29/23 16:17:50.408 - STEP: Creating a custom resource while v1 is storage version 07/29/23 16:17:50.448 - STEP: Patching Custom Resource Definition to set v2 as storage 07/29/23 16:17:52.802 - STEP: Patching the custom resource while v2 is storage version 07/29/23 16:17:52.831 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] when create a pod with lifecycle hook + test/e2e/common/node/lifecycle_hook.go:77 + STEP: create the container to handle the HTTPGet hook request. 08/24/23 12:21:55.998 + Aug 24 12:21:56.016: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-1487" to be "running and ready" + Aug 24 12:21:56.022: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 5.380547ms + Aug 24 12:21:56.022: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:21:58.030: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013856465s + Aug 24 12:21:58.030: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:22:00.032: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 4.015825286s + Aug 24 12:22:00.032: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) + Aug 24 12:22:00.032: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" + [It] should execute poststart exec hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:134 + STEP: create the pod with lifecycle hook 08/24/23 12:22:00.038 + Aug 24 12:22:00.049: INFO: Waiting up to 5m0s for pod "pod-with-poststart-exec-hook" in namespace "container-lifecycle-hook-1487" to be "running and ready" + Aug 24 12:22:00.059: INFO: Pod "pod-with-poststart-exec-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 10.163026ms + Aug 24 12:22:00.059: INFO: The phase of Pod pod-with-poststart-exec-hook is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:22:02.072: INFO: Pod "pod-with-poststart-exec-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.023383779s + Aug 24 12:22:02.072: INFO: The phase of Pod pod-with-poststart-exec-hook is Running (Ready = true) + Aug 24 12:22:02.073: INFO: Pod "pod-with-poststart-exec-hook" satisfied condition "running and ready" + STEP: check poststart hook 08/24/23 12:22:02.08 + STEP: delete the pod with lifecycle hook 08/24/23 12:22:02.13 + Aug 24 12:22:02.160: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear + Aug 24 12:22:02.171: INFO: Pod pod-with-poststart-exec-hook still exists + Aug 24 12:22:04.172: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear + Aug 24 12:22:04.180: INFO: Pod pod-with-poststart-exec-hook still exists + Aug 24 12:22:06.171: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear + Aug 24 12:22:06.179: INFO: Pod pod-with-poststart-exec-hook no longer exists + [AfterEach] [sig-node] Container Lifecycle Hook test/e2e/framework/node/init/init.go:32 - Jul 29 16:17:53.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 12:22:06.179: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-1048" for this suite. 07/29/23 16:17:53.876 - STEP: Destroying namespace "webhook-1048-markers" for this suite. 07/29/23 16:17:53.891 + STEP: Destroying namespace "container-lifecycle-hook-1487" for this suite. 08/24/23 12:22:06.189 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SS ------------------------------ -[sig-network] DNS - should support configurable pod DNS nameservers [Conformance] - test/e2e/network/dns.go:411 -[BeforeEach] [sig-network] DNS +[sig-storage] Projected configMap + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:375 +[BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:17:53.916 -Jul 29 16:17:53.917: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename dns 07/29/23 16:17:53.92 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:53.972 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:53.976 -[BeforeEach] [sig-network] DNS +STEP: Creating a kubernetes client 08/24/23 12:22:06.204 +Aug 24 12:22:06.204: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:22:06.206 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:06.232 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:06.236 +[BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 -[It] should support configurable pod DNS nameservers [Conformance] - test/e2e/network/dns.go:411 -STEP: Creating a pod with dnsPolicy=None and customized dnsConfig... 07/29/23 16:17:53.98 -Jul 29 16:17:54.003: INFO: Created pod &Pod{ObjectMeta:{test-dns-nameservers dns-8710 a055032b-2693-44b4-872f-da55708aad4a 21488 0 2023-07-29 16:17:53 +0000 UTC map[] map[] [] [] [{e2e.test Update v1 2023-07-29 16:17:53 +0000 UTC FieldsV1 {"f:spec":{"f:containers":{"k:{\"name\":\"agnhost-container\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsConfig":{".":{},"f:nameservers":{},"f:searches":{}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-s2n8q,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost-container,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[pause],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s2n8q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:None,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:&PodDNSConfig{Nameservers:[1.1.1.1],Searches:[resolv.conf.local],Options:[]PodDNSConfigOption{},},ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 16:17:54.004: INFO: Waiting up to 5m0s for pod "test-dns-nameservers" in namespace "dns-8710" to be "running and ready" -Jul 29 16:17:54.011: INFO: Pod "test-dns-nameservers": Phase="Pending", Reason="", readiness=false. Elapsed: 7.011791ms -Jul 29 16:17:54.011: INFO: The phase of Pod test-dns-nameservers is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:17:56.019: INFO: Pod "test-dns-nameservers": Phase="Running", Reason="", readiness=true. Elapsed: 2.015268229s -Jul 29 16:17:56.019: INFO: The phase of Pod test-dns-nameservers is Running (Ready = true) -Jul 29 16:17:56.019: INFO: Pod "test-dns-nameservers" satisfied condition "running and ready" -STEP: Verifying customized DNS suffix list is configured on pod... 07/29/23 16:17:56.019 -Jul 29 16:17:56.020: INFO: ExecWithOptions {Command:[/agnhost dns-suffix] Namespace:dns-8710 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:17:56.020: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:17:56.022: INFO: ExecWithOptions: Clientset creation -Jul 29 16:17:56.022: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/dns-8710/pods/test-dns-nameservers/exec?command=%2Fagnhost&command=dns-suffix&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) -STEP: Verifying customized DNS server is configured on pod... 07/29/23 16:17:56.156 -Jul 29 16:17:56.157: INFO: ExecWithOptions {Command:[/agnhost dns-server-list] Namespace:dns-8710 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:17:56.157: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:17:56.159: INFO: ExecWithOptions: Clientset creation -Jul 29 16:17:56.159: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/dns-8710/pods/test-dns-nameservers/exec?command=%2Fagnhost&command=dns-server-list&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) -Jul 29 16:17:56.298: INFO: Deleting pod test-dns-nameservers... -[AfterEach] [sig-network] DNS +[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:375 +STEP: Creating configMap with name projected-configmap-test-volume-68dc8eef-7bae-4719-8bf8-ea9ca3407c66 08/24/23 12:22:06.243 +STEP: Creating a pod to test consume configMaps 08/24/23 12:22:06.253 +Aug 24 12:22:06.269: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6" in namespace "projected-7558" to be "Succeeded or Failed" +Aug 24 12:22:06.274: INFO: Pod "pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6": Phase="Pending", Reason="", readiness=false. Elapsed: 5.764878ms +Aug 24 12:22:08.286: INFO: Pod "pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017009128s +Aug 24 12:22:10.284: INFO: Pod "pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015563587s +STEP: Saw pod success 08/24/23 12:22:10.284 +Aug 24 12:22:10.285: INFO: Pod "pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6" satisfied condition "Succeeded or Failed" +Aug 24 12:22:10.290: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6 container projected-configmap-volume-test: +STEP: delete the pod 08/24/23 12:22:10.301 +Aug 24 12:22:10.323: INFO: Waiting for pod pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6 to disappear +Aug 24 12:22:10.330: INFO: Pod pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6 no longer exists +[AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:17:56.337: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] DNS +Aug 24 12:22:10.330: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 -STEP: Destroying namespace "dns-8710" for this suite. 07/29/23 16:17:56.348 +STEP: Destroying namespace "projected-7558" for this suite. 08/24/23 12:22:10.34 ------------------------------ -• [2.455 seconds] -[sig-network] DNS -test/e2e/network/common/framework.go:23 - should support configurable pod DNS nameservers [Conformance] - test/e2e/network/dns.go:411 +• [4.147 seconds] +[sig-storage] Projected configMap +test/e2e/common/storage/framework.go:23 + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:375 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] DNS + [BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:17:53.916 - Jul 29 16:17:53.917: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename dns 07/29/23 16:17:53.92 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:53.972 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:53.976 - [BeforeEach] [sig-network] DNS + STEP: Creating a kubernetes client 08/24/23 12:22:06.204 + Aug 24 12:22:06.204: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:22:06.206 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:06.232 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:06.236 + [BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 - [It] should support configurable pod DNS nameservers [Conformance] - test/e2e/network/dns.go:411 - STEP: Creating a pod with dnsPolicy=None and customized dnsConfig... 07/29/23 16:17:53.98 - Jul 29 16:17:54.003: INFO: Created pod &Pod{ObjectMeta:{test-dns-nameservers dns-8710 a055032b-2693-44b4-872f-da55708aad4a 21488 0 2023-07-29 16:17:53 +0000 UTC map[] map[] [] [] [{e2e.test Update v1 2023-07-29 16:17:53 +0000 UTC FieldsV1 {"f:spec":{"f:containers":{"k:{\"name\":\"agnhost-container\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsConfig":{".":{},"f:nameservers":{},"f:searches":{}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-s2n8q,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost-container,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[pause],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s2n8q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:None,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:&PodDNSConfig{Nameservers:[1.1.1.1],Searches:[resolv.conf.local],Options:[]PodDNSConfigOption{},},ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 16:17:54.004: INFO: Waiting up to 5m0s for pod "test-dns-nameservers" in namespace "dns-8710" to be "running and ready" - Jul 29 16:17:54.011: INFO: Pod "test-dns-nameservers": Phase="Pending", Reason="", readiness=false. Elapsed: 7.011791ms - Jul 29 16:17:54.011: INFO: The phase of Pod test-dns-nameservers is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:17:56.019: INFO: Pod "test-dns-nameservers": Phase="Running", Reason="", readiness=true. Elapsed: 2.015268229s - Jul 29 16:17:56.019: INFO: The phase of Pod test-dns-nameservers is Running (Ready = true) - Jul 29 16:17:56.019: INFO: Pod "test-dns-nameservers" satisfied condition "running and ready" - STEP: Verifying customized DNS suffix list is configured on pod... 07/29/23 16:17:56.019 - Jul 29 16:17:56.020: INFO: ExecWithOptions {Command:[/agnhost dns-suffix] Namespace:dns-8710 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:17:56.020: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:17:56.022: INFO: ExecWithOptions: Clientset creation - Jul 29 16:17:56.022: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/dns-8710/pods/test-dns-nameservers/exec?command=%2Fagnhost&command=dns-suffix&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) - STEP: Verifying customized DNS server is configured on pod... 07/29/23 16:17:56.156 - Jul 29 16:17:56.157: INFO: ExecWithOptions {Command:[/agnhost dns-server-list] Namespace:dns-8710 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:17:56.157: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:17:56.159: INFO: ExecWithOptions: Clientset creation - Jul 29 16:17:56.159: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/dns-8710/pods/test-dns-nameservers/exec?command=%2Fagnhost&command=dns-server-list&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) - Jul 29 16:17:56.298: INFO: Deleting pod test-dns-nameservers... - [AfterEach] [sig-network] DNS + [It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:375 + STEP: Creating configMap with name projected-configmap-test-volume-68dc8eef-7bae-4719-8bf8-ea9ca3407c66 08/24/23 12:22:06.243 + STEP: Creating a pod to test consume configMaps 08/24/23 12:22:06.253 + Aug 24 12:22:06.269: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6" in namespace "projected-7558" to be "Succeeded or Failed" + Aug 24 12:22:06.274: INFO: Pod "pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6": Phase="Pending", Reason="", readiness=false. Elapsed: 5.764878ms + Aug 24 12:22:08.286: INFO: Pod "pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017009128s + Aug 24 12:22:10.284: INFO: Pod "pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015563587s + STEP: Saw pod success 08/24/23 12:22:10.284 + Aug 24 12:22:10.285: INFO: Pod "pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6" satisfied condition "Succeeded or Failed" + Aug 24 12:22:10.290: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6 container projected-configmap-volume-test: + STEP: delete the pod 08/24/23 12:22:10.301 + Aug 24 12:22:10.323: INFO: Waiting for pod pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6 to disappear + Aug 24 12:22:10.330: INFO: Pod pod-projected-configmaps-ccc68296-3a9a-4cbd-b752-f289203939d6 no longer exists + [AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:17:56.337: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] DNS + Aug 24 12:22:10.330: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 - STEP: Destroying namespace "dns-8710" for this suite. 07/29/23 16:17:56.348 + STEP: Destroying namespace "projected-7558" for this suite. 08/24/23 12:22:10.34 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSS ------------------------------ -[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] - should validate Statefulset Status endpoints [Conformance] - test/e2e/apps/statefulset.go:977 -[BeforeEach] [sig-apps] StatefulSet +[sig-storage] EmptyDir volumes + should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:177 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:17:56.373 -Jul 29 16:17:56.373: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename statefulset 07/29/23 16:17:56.375 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:56.41 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:56.416 -[BeforeEach] [sig-apps] StatefulSet +STEP: Creating a kubernetes client 08/24/23 12:22:10.353 +Aug 24 12:22:10.353: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:22:10.356 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:10.386 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:10.392 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 -[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 -STEP: Creating service test in namespace statefulset-6007 07/29/23 16:17:56.421 -[It] should validate Statefulset Status endpoints [Conformance] - test/e2e/apps/statefulset.go:977 -STEP: Creating statefulset ss in namespace statefulset-6007 07/29/23 16:17:56.437 -Jul 29 16:17:56.455: INFO: Found 0 stateful pods, waiting for 1 -Jul 29 16:18:06.465: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -STEP: Patch Statefulset to include a label 07/29/23 16:18:06.475 -STEP: Getting /status 07/29/23 16:18:06.488 -Jul 29 16:18:06.499: INFO: StatefulSet ss has Conditions: []v1.StatefulSetCondition(nil) -STEP: updating the StatefulSet Status 07/29/23 16:18:06.499 -Jul 29 16:18:06.519: INFO: updatedStatus.Conditions: []v1.StatefulSetCondition{v1.StatefulSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} -STEP: watching for the statefulset status to be updated 07/29/23 16:18:06.52 -Jul 29 16:18:06.523: INFO: Observed &StatefulSet event: ADDED -Jul 29 16:18:06.523: INFO: Found Statefulset ss in namespace statefulset-6007 with labels: map[e2e:testing] annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} -Jul 29 16:18:06.524: INFO: Statefulset ss has an updated status -STEP: patching the Statefulset Status 07/29/23 16:18:06.524 -Jul 29 16:18:06.524: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} -Jul 29 16:18:06.539: INFO: Patched status conditions: []v1.StatefulSetCondition{v1.StatefulSetCondition{Type:"StatusPatched", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} -STEP: watching for the Statefulset status to be patched 07/29/23 16:18:06.54 -Jul 29 16:18:06.543: INFO: Observed &StatefulSet event: ADDED -[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 -Jul 29 16:18:06.543: INFO: Deleting all statefulset in ns statefulset-6007 -Jul 29 16:18:06.549: INFO: Scaling statefulset ss to 0 -Jul 29 16:18:16.601: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 16:18:16.612: INFO: Deleting statefulset ss -[AfterEach] [sig-apps] StatefulSet +[It] should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:177 +STEP: Creating a pod to test emptydir 0666 on node default medium 08/24/23 12:22:10.397 +Aug 24 12:22:10.413: INFO: Waiting up to 5m0s for pod "pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc" in namespace "emptydir-6731" to be "Succeeded or Failed" +Aug 24 12:22:10.423: INFO: Pod "pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc": Phase="Pending", Reason="", readiness=false. Elapsed: 10.352029ms +Aug 24 12:22:12.432: INFO: Pod "pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019247345s +Aug 24 12:22:14.432: INFO: Pod "pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018707008s +STEP: Saw pod success 08/24/23 12:22:14.432 +Aug 24 12:22:14.432: INFO: Pod "pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc" satisfied condition "Succeeded or Failed" +Aug 24 12:22:14.438: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc container test-container: +STEP: delete the pod 08/24/23 12:22:14.45 +Aug 24 12:22:14.470: INFO: Waiting for pod pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc to disappear +Aug 24 12:22:14.476: INFO: Pod pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc no longer exists +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:16.638: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] StatefulSet +Aug 24 12:22:14.477: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "statefulset-6007" for this suite. 07/29/23 16:18:16.647 +STEP: Destroying namespace "emptydir-6731" for this suite. 08/24/23 12:22:14.487 ------------------------------ -• [SLOW TEST] [20.285 seconds] -[sig-apps] StatefulSet -test/e2e/apps/framework.go:23 - Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:103 - should validate Statefulset Status endpoints [Conformance] - test/e2e/apps/statefulset.go:977 +• [4.152 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:177 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] StatefulSet + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:17:56.373 - Jul 29 16:17:56.373: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename statefulset 07/29/23 16:17:56.375 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:17:56.41 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:17:56.416 - [BeforeEach] [sig-apps] StatefulSet + STEP: Creating a kubernetes client 08/24/23 12:22:10.353 + Aug 24 12:22:10.353: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:22:10.356 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:10.386 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:10.392 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 - [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 - STEP: Creating service test in namespace statefulset-6007 07/29/23 16:17:56.421 - [It] should validate Statefulset Status endpoints [Conformance] - test/e2e/apps/statefulset.go:977 - STEP: Creating statefulset ss in namespace statefulset-6007 07/29/23 16:17:56.437 - Jul 29 16:17:56.455: INFO: Found 0 stateful pods, waiting for 1 - Jul 29 16:18:06.465: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true - STEP: Patch Statefulset to include a label 07/29/23 16:18:06.475 - STEP: Getting /status 07/29/23 16:18:06.488 - Jul 29 16:18:06.499: INFO: StatefulSet ss has Conditions: []v1.StatefulSetCondition(nil) - STEP: updating the StatefulSet Status 07/29/23 16:18:06.499 - Jul 29 16:18:06.519: INFO: updatedStatus.Conditions: []v1.StatefulSetCondition{v1.StatefulSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} - STEP: watching for the statefulset status to be updated 07/29/23 16:18:06.52 - Jul 29 16:18:06.523: INFO: Observed &StatefulSet event: ADDED - Jul 29 16:18:06.523: INFO: Found Statefulset ss in namespace statefulset-6007 with labels: map[e2e:testing] annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} - Jul 29 16:18:06.524: INFO: Statefulset ss has an updated status - STEP: patching the Statefulset Status 07/29/23 16:18:06.524 - Jul 29 16:18:06.524: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} - Jul 29 16:18:06.539: INFO: Patched status conditions: []v1.StatefulSetCondition{v1.StatefulSetCondition{Type:"StatusPatched", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} - STEP: watching for the Statefulset status to be patched 07/29/23 16:18:06.54 - Jul 29 16:18:06.543: INFO: Observed &StatefulSet event: ADDED - [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 - Jul 29 16:18:06.543: INFO: Deleting all statefulset in ns statefulset-6007 - Jul 29 16:18:06.549: INFO: Scaling statefulset ss to 0 - Jul 29 16:18:16.601: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 16:18:16.612: INFO: Deleting statefulset ss - [AfterEach] [sig-apps] StatefulSet + [It] should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:177 + STEP: Creating a pod to test emptydir 0666 on node default medium 08/24/23 12:22:10.397 + Aug 24 12:22:10.413: INFO: Waiting up to 5m0s for pod "pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc" in namespace "emptydir-6731" to be "Succeeded or Failed" + Aug 24 12:22:10.423: INFO: Pod "pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc": Phase="Pending", Reason="", readiness=false. Elapsed: 10.352029ms + Aug 24 12:22:12.432: INFO: Pod "pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019247345s + Aug 24 12:22:14.432: INFO: Pod "pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018707008s + STEP: Saw pod success 08/24/23 12:22:14.432 + Aug 24 12:22:14.432: INFO: Pod "pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc" satisfied condition "Succeeded or Failed" + Aug 24 12:22:14.438: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc container test-container: + STEP: delete the pod 08/24/23 12:22:14.45 + Aug 24 12:22:14.470: INFO: Waiting for pod pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc to disappear + Aug 24 12:22:14.476: INFO: Pod pod-18835aa8-59cf-4e74-8d1c-33e5ff0a5abc no longer exists + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:16.638: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] StatefulSet + Aug 24 12:22:14.477: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "statefulset-6007" for this suite. 07/29/23 16:18:16.647 + STEP: Destroying namespace "emptydir-6731" for this suite. 08/24/23 12:22:14.487 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSS ------------------------------ -[sig-api-machinery] Namespaces [Serial] - should patch a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:268 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +[sig-node] Variable Expansion + should fail substituting values in a volume subpath with backticks [Slow] [Conformance] + test/e2e/common/node/expansion.go:152 +[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:16.67 -Jul 29 16:18:16.671: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename namespaces 07/29/23 16:18:16.674 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:16.712 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:16.718 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +STEP: Creating a kubernetes client 08/24/23 12:22:14.506 +Aug 24 12:22:14.506: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename var-expansion 08/24/23 12:22:14.508 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:14.542 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:14.55 +[BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 -[It] should patch a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:268 -STEP: creating a Namespace 07/29/23 16:18:16.723 -STEP: patching the Namespace 07/29/23 16:18:16.75 -STEP: get the Namespace and ensuring it has the label 07/29/23 16:18:16.758 -[AfterEach] [sig-api-machinery] Namespaces [Serial] +[It] should fail substituting values in a volume subpath with backticks [Slow] [Conformance] + test/e2e/common/node/expansion.go:152 +Aug 24 12:22:14.577: INFO: Waiting up to 2m0s for pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a" in namespace "var-expansion-5845" to be "container 0 failed with reason CreateContainerConfigError" +Aug 24 12:22:14.588: INFO: Pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a": Phase="Pending", Reason="", readiness=false. Elapsed: 10.295757ms +Aug 24 12:22:16.595: INFO: Pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017208299s +Aug 24 12:22:16.595: INFO: Pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a" satisfied condition "container 0 failed with reason CreateContainerConfigError" +Aug 24 12:22:16.595: INFO: Deleting pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a" in namespace "var-expansion-5845" +Aug 24 12:22:16.609: INFO: Wait up to 5m0s for pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a" to be fully deleted +[AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:16.765: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +Aug 24 12:22:18.626: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +[DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +[DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 -STEP: Destroying namespace "namespaces-5806" for this suite. 07/29/23 16:18:16.774 -STEP: Destroying namespace "nspatchtest-212a7ba7-0495-4b7b-97c5-44de8aa294d1-5324" for this suite. 07/29/23 16:18:16.784 +STEP: Destroying namespace "var-expansion-5845" for this suite. 08/24/23 12:22:18.635 ------------------------------ -• [0.123 seconds] -[sig-api-machinery] Namespaces [Serial] -test/e2e/apimachinery/framework.go:23 - should patch a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:268 +• [4.139 seconds] +[sig-node] Variable Expansion +test/e2e/common/node/framework.go:23 + should fail substituting values in a volume subpath with backticks [Slow] [Conformance] + test/e2e/common/node/expansion.go:152 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Namespaces [Serial] + [BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:16.67 - Jul 29 16:18:16.671: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename namespaces 07/29/23 16:18:16.674 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:16.712 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:16.718 - [BeforeEach] [sig-api-machinery] Namespaces [Serial] + STEP: Creating a kubernetes client 08/24/23 12:22:14.506 + Aug 24 12:22:14.506: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename var-expansion 08/24/23 12:22:14.508 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:14.542 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:14.55 + [BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 - [It] should patch a Namespace [Conformance] - test/e2e/apimachinery/namespace.go:268 - STEP: creating a Namespace 07/29/23 16:18:16.723 - STEP: patching the Namespace 07/29/23 16:18:16.75 - STEP: get the Namespace and ensuring it has the label 07/29/23 16:18:16.758 - [AfterEach] [sig-api-machinery] Namespaces [Serial] + [It] should fail substituting values in a volume subpath with backticks [Slow] [Conformance] + test/e2e/common/node/expansion.go:152 + Aug 24 12:22:14.577: INFO: Waiting up to 2m0s for pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a" in namespace "var-expansion-5845" to be "container 0 failed with reason CreateContainerConfigError" + Aug 24 12:22:14.588: INFO: Pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a": Phase="Pending", Reason="", readiness=false. Elapsed: 10.295757ms + Aug 24 12:22:16.595: INFO: Pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017208299s + Aug 24 12:22:16.595: INFO: Pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a" satisfied condition "container 0 failed with reason CreateContainerConfigError" + Aug 24 12:22:16.595: INFO: Deleting pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a" in namespace "var-expansion-5845" + Aug 24 12:22:16.609: INFO: Wait up to 5m0s for pod "var-expansion-d6160d00-d301-4b56-aea0-429c1dfce20a" to be fully deleted + [AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:16.765: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + Aug 24 12:22:18.626: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + [DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + [DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 - STEP: Destroying namespace "namespaces-5806" for this suite. 07/29/23 16:18:16.774 - STEP: Destroying namespace "nspatchtest-212a7ba7-0495-4b7b-97c5-44de8aa294d1-5324" for this suite. 07/29/23 16:18:16.784 + STEP: Destroying namespace "var-expansion-5845" for this suite. 08/24/23 12:22:18.635 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - listing mutating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:656 + should unconditionally reject operations on fail closed webhook [Conformance] + test/e2e/apimachinery/webhook.go:239 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:16.797 -Jul 29 16:18:16.798: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 16:18:16.8 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:16.825 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:16.83 +STEP: Creating a kubernetes client 08/24/23 12:22:18.655 +Aug 24 12:22:18.655: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 12:22:18.657 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:18.689 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:18.695 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 16:18:16.858 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:18:18.981 -STEP: Deploying the webhook pod 07/29/23 16:18:18.999 -STEP: Wait for the deployment to be ready 07/29/23 16:18:19.017 -Jul 29 16:18:19.031: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -STEP: Deploying the webhook service 07/29/23 16:18:21.051 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:18:21.074 -Jul 29 16:18:22.074: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] listing mutating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:656 -STEP: Listing all of the created validation webhooks 07/29/23 16:18:22.181 -STEP: Creating a configMap that should be mutated 07/29/23 16:18:22.209 -STEP: Deleting the collection of validation webhooks 07/29/23 16:18:22.269 -STEP: Creating a configMap that should not be mutated 07/29/23 16:18:22.372 +STEP: Setting up server cert 08/24/23 12:22:18.723 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:22:19.901 +STEP: Deploying the webhook pod 08/24/23 12:22:19.922 +STEP: Wait for the deployment to be ready 08/24/23 12:22:19.947 +Aug 24 12:22:19.963: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service 08/24/23 12:22:21.992 +STEP: Verifying the service has paired with the endpoint 08/24/23 12:22:22.011 +Aug 24 12:22:23.012: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should unconditionally reject operations on fail closed webhook [Conformance] + test/e2e/apimachinery/webhook.go:239 +STEP: Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API 08/24/23 12:22:23.019 +STEP: create a namespace for the webhook 08/24/23 12:22:23.099 +STEP: create a configmap should be unconditionally rejected by the webhook 08/24/23 12:22:23.116 [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:22.392: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:22:23.195: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:105 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -16406,44 +14724,43 @@ Jul 29 16:18:22.392: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-1102" for this suite. 07/29/23 16:18:22.524 -STEP: Destroying namespace "webhook-1102-markers" for this suite. 07/29/23 16:18:22.539 +STEP: Destroying namespace "webhook-746" for this suite. 08/24/23 12:22:23.289 +STEP: Destroying namespace "webhook-746-markers" for this suite. 08/24/23 12:22:23.303 ------------------------------ -• [SLOW TEST] [5.770 seconds] +• [4.665 seconds] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/framework.go:23 - listing mutating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:656 + should unconditionally reject operations on fail closed webhook [Conformance] + test/e2e/apimachinery/webhook.go:239 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:16.797 - Jul 29 16:18:16.798: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 16:18:16.8 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:16.825 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:16.83 + STEP: Creating a kubernetes client 08/24/23 12:22:18.655 + Aug 24 12:22:18.655: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 12:22:18.657 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:18.689 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:18.695 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 16:18:16.858 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:18:18.981 - STEP: Deploying the webhook pod 07/29/23 16:18:18.999 - STEP: Wait for the deployment to be ready 07/29/23 16:18:19.017 - Jul 29 16:18:19.031: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created - STEP: Deploying the webhook service 07/29/23 16:18:21.051 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:18:21.074 - Jul 29 16:18:22.074: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] listing mutating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:656 - STEP: Listing all of the created validation webhooks 07/29/23 16:18:22.181 - STEP: Creating a configMap that should be mutated 07/29/23 16:18:22.209 - STEP: Deleting the collection of validation webhooks 07/29/23 16:18:22.269 - STEP: Creating a configMap that should not be mutated 07/29/23 16:18:22.372 + STEP: Setting up server cert 08/24/23 12:22:18.723 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:22:19.901 + STEP: Deploying the webhook pod 08/24/23 12:22:19.922 + STEP: Wait for the deployment to be ready 08/24/23 12:22:19.947 + Aug 24 12:22:19.963: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set + STEP: Deploying the webhook service 08/24/23 12:22:21.992 + STEP: Verifying the service has paired with the endpoint 08/24/23 12:22:22.011 + Aug 24 12:22:23.012: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should unconditionally reject operations on fail closed webhook [Conformance] + test/e2e/apimachinery/webhook.go:239 + STEP: Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API 08/24/23 12:22:23.019 + STEP: create a namespace for the webhook 08/24/23 12:22:23.099 + STEP: create a configmap should be unconditionally rejected by the webhook 08/24/23 12:22:23.116 [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:22.392: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:22:23.195: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:105 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -16452,3135 +14769,3732 @@ test/e2e/apimachinery/framework.go:23 dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-1102" for this suite. 07/29/23 16:18:22.524 - STEP: Destroying namespace "webhook-1102-markers" for this suite. 07/29/23 16:18:22.539 + STEP: Destroying namespace "webhook-746" for this suite. 08/24/23 12:22:23.289 + STEP: Destroying namespace "webhook-746-markers" for this suite. 08/24/23 12:22:23.303 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] ConfigMap - should be consumable via environment variable [NodeConformance] [Conformance] - test/e2e/common/node/configmap.go:45 -[BeforeEach] [sig-node] ConfigMap +[sig-node] Containers + should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:73 +[BeforeEach] [sig-node] Containers set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:22.595 -Jul 29 16:18:22.595: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:18:22.598 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:22.667 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:22.676 -[BeforeEach] [sig-node] ConfigMap +STEP: Creating a kubernetes client 08/24/23 12:22:23.326 +Aug 24 12:22:23.326: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename containers 08/24/23 12:22:23.328 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:23.382 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:23.386 +[BeforeEach] [sig-node] Containers test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable via environment variable [NodeConformance] [Conformance] - test/e2e/common/node/configmap.go:45 -STEP: Creating configMap configmap-3923/configmap-test-3b0e665e-918d-4a59-b8a0-26fe9553abf8 07/29/23 16:18:22.681 -STEP: Creating a pod to test consume configMaps 07/29/23 16:18:22.689 -Jul 29 16:18:22.711: INFO: Waiting up to 5m0s for pod "pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df" in namespace "configmap-3923" to be "Succeeded or Failed" -Jul 29 16:18:22.725: INFO: Pod "pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df": Phase="Pending", Reason="", readiness=false. Elapsed: 13.050144ms -Jul 29 16:18:24.732: INFO: Pod "pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02024824s -Jul 29 16:18:26.736: INFO: Pod "pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024163275s -STEP: Saw pod success 07/29/23 16:18:26.736 -Jul 29 16:18:26.737: INFO: Pod "pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df" satisfied condition "Succeeded or Failed" -Jul 29 16:18:26.743: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df container env-test: -STEP: delete the pod 07/29/23 16:18:26.756 -Jul 29 16:18:26.785: INFO: Waiting for pod pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df to disappear -Jul 29 16:18:26.791: INFO: Pod pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df no longer exists -[AfterEach] [sig-node] ConfigMap +[It] should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:73 +STEP: Creating a pod to test override command 08/24/23 12:22:23.396 +Aug 24 12:22:23.424: INFO: Waiting up to 5m0s for pod "client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0" in namespace "containers-643" to be "Succeeded or Failed" +Aug 24 12:22:23.460: INFO: Pod "client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0": Phase="Pending", Reason="", readiness=false. Elapsed: 35.688133ms +Aug 24 12:22:25.469: INFO: Pod "client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0": Phase="Pending", Reason="", readiness=false. Elapsed: 2.045301172s +Aug 24 12:22:27.468: INFO: Pod "client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.043870592s +STEP: Saw pod success 08/24/23 12:22:27.468 +Aug 24 12:22:27.468: INFO: Pod "client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0" satisfied condition "Succeeded or Failed" +Aug 24 12:22:27.475: INFO: Trying to get logs from node pe9deep4seen-3 pod client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0 container agnhost-container: +STEP: delete the pod 08/24/23 12:22:27.488 +Aug 24 12:22:27.512: INFO: Waiting for pod client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0 to disappear +Aug 24 12:22:27.518: INFO: Pod client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0 no longer exists +[AfterEach] [sig-node] Containers test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:26.792: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] ConfigMap +Aug 24 12:22:27.519: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Containers test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] ConfigMap +[DeferCleanup (Each)] [sig-node] Containers dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] ConfigMap +[DeferCleanup (Each)] [sig-node] Containers tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-3923" for this suite. 07/29/23 16:18:26.802 +STEP: Destroying namespace "containers-643" for this suite. 08/24/23 12:22:27.527 ------------------------------ -• [4.222 seconds] -[sig-node] ConfigMap +• [4.218 seconds] +[sig-node] Containers test/e2e/common/node/framework.go:23 - should be consumable via environment variable [NodeConformance] [Conformance] - test/e2e/common/node/configmap.go:45 + should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:73 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] ConfigMap + [BeforeEach] [sig-node] Containers set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:22.595 - Jul 29 16:18:22.595: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:18:22.598 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:22.667 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:22.676 - [BeforeEach] [sig-node] ConfigMap + STEP: Creating a kubernetes client 08/24/23 12:22:23.326 + Aug 24 12:22:23.326: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename containers 08/24/23 12:22:23.328 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:23.382 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:23.386 + [BeforeEach] [sig-node] Containers test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable via environment variable [NodeConformance] [Conformance] - test/e2e/common/node/configmap.go:45 - STEP: Creating configMap configmap-3923/configmap-test-3b0e665e-918d-4a59-b8a0-26fe9553abf8 07/29/23 16:18:22.681 - STEP: Creating a pod to test consume configMaps 07/29/23 16:18:22.689 - Jul 29 16:18:22.711: INFO: Waiting up to 5m0s for pod "pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df" in namespace "configmap-3923" to be "Succeeded or Failed" - Jul 29 16:18:22.725: INFO: Pod "pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df": Phase="Pending", Reason="", readiness=false. Elapsed: 13.050144ms - Jul 29 16:18:24.732: INFO: Pod "pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02024824s - Jul 29 16:18:26.736: INFO: Pod "pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024163275s - STEP: Saw pod success 07/29/23 16:18:26.736 - Jul 29 16:18:26.737: INFO: Pod "pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df" satisfied condition "Succeeded or Failed" - Jul 29 16:18:26.743: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df container env-test: - STEP: delete the pod 07/29/23 16:18:26.756 - Jul 29 16:18:26.785: INFO: Waiting for pod pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df to disappear - Jul 29 16:18:26.791: INFO: Pod pod-configmaps-fb42bc29-0c11-43a9-8288-60c1590f77df no longer exists - [AfterEach] [sig-node] ConfigMap + [It] should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:73 + STEP: Creating a pod to test override command 08/24/23 12:22:23.396 + Aug 24 12:22:23.424: INFO: Waiting up to 5m0s for pod "client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0" in namespace "containers-643" to be "Succeeded or Failed" + Aug 24 12:22:23.460: INFO: Pod "client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0": Phase="Pending", Reason="", readiness=false. Elapsed: 35.688133ms + Aug 24 12:22:25.469: INFO: Pod "client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0": Phase="Pending", Reason="", readiness=false. Elapsed: 2.045301172s + Aug 24 12:22:27.468: INFO: Pod "client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.043870592s + STEP: Saw pod success 08/24/23 12:22:27.468 + Aug 24 12:22:27.468: INFO: Pod "client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0" satisfied condition "Succeeded or Failed" + Aug 24 12:22:27.475: INFO: Trying to get logs from node pe9deep4seen-3 pod client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0 container agnhost-container: + STEP: delete the pod 08/24/23 12:22:27.488 + Aug 24 12:22:27.512: INFO: Waiting for pod client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0 to disappear + Aug 24 12:22:27.518: INFO: Pod client-containers-d8f6bfda-e314-41af-b722-3e3d0c925ca0 no longer exists + [AfterEach] [sig-node] Containers test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:26.792: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] ConfigMap + Aug 24 12:22:27.519: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Containers test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] ConfigMap + [DeferCleanup (Each)] [sig-node] Containers dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] ConfigMap + [DeferCleanup (Each)] [sig-node] Containers tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-3923" for this suite. 07/29/23 16:18:26.802 + STEP: Destroying namespace "containers-643" for this suite. 08/24/23 12:22:27.527 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-network] Services - should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] - test/e2e/network/service.go:2213 + should be able to change the type from NodePort to ExternalName [Conformance] + test/e2e/network/service.go:1557 [BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:26.822 -Jul 29 16:18:26.822: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 16:18:26.824 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:26.861 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:26.869 +STEP: Creating a kubernetes client 08/24/23 12:22:27.551 +Aug 24 12:22:27.551: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 12:22:27.554 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:27.586 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:27.593 [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] Services test/e2e/network/service.go:766 -[It] should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] - test/e2e/network/service.go:2213 -STEP: creating service in namespace services-990 07/29/23 16:18:26.874 -STEP: creating service affinity-clusterip-transition in namespace services-990 07/29/23 16:18:26.874 -STEP: creating replication controller affinity-clusterip-transition in namespace services-990 07/29/23 16:18:26.891 -I0729 16:18:26.909656 13 runners.go:193] Created replication controller with name: affinity-clusterip-transition, namespace: services-990, replica count: 3 -I0729 16:18:29.976930 13 runners.go:193] affinity-clusterip-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Jul 29 16:18:29.989: INFO: Creating new exec pod -Jul 29 16:18:30.001: INFO: Waiting up to 5m0s for pod "execpod-affinity8qvgd" in namespace "services-990" to be "running" -Jul 29 16:18:30.006: INFO: Pod "execpod-affinity8qvgd": Phase="Pending", Reason="", readiness=false. Elapsed: 5.410456ms -Jul 29 16:18:32.014: INFO: Pod "execpod-affinity8qvgd": Phase="Running", Reason="", readiness=true. Elapsed: 2.013482332s -Jul 29 16:18:32.015: INFO: Pod "execpod-affinity8qvgd" satisfied condition "running" -Jul 29 16:18:33.016: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-990 exec execpod-affinity8qvgd -- /bin/sh -x -c nc -v -z -w 2 affinity-clusterip-transition 80' -Jul 29 16:18:33.292: INFO: stderr: "+ nc -v -z -w 2 affinity-clusterip-transition 80\nConnection to affinity-clusterip-transition 80 port [tcp/http] succeeded!\n" -Jul 29 16:18:33.292: INFO: stdout: "" -Jul 29 16:18:33.292: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-990 exec execpod-affinity8qvgd -- /bin/sh -x -c nc -v -z -w 2 10.233.6.27 80' -Jul 29 16:18:33.556: INFO: stderr: "+ nc -v -z -w 2 10.233.6.27 80\nConnection to 10.233.6.27 80 port [tcp/http] succeeded!\n" -Jul 29 16:18:33.556: INFO: stdout: "" -Jul 29 16:18:33.574: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-990 exec execpod-affinity8qvgd -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.6.27:80/ ; done' -Jul 29 16:18:34.079: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n" -Jul 29 16:18:34.079: INFO: stdout: "\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-cl4ts\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-cl4ts\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p" -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-cl4ts -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-cl4ts -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.096: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-990 exec execpod-affinity8qvgd -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.6.27:80/ ; done' -Jul 29 16:18:34.554: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n" -Jul 29 16:18:34.554: INFO: stdout: "\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p" -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p -Jul 29 16:18:34.554: INFO: Cleaning up the exec pod -STEP: deleting ReplicationController affinity-clusterip-transition in namespace services-990, will wait for the garbage collector to delete the pods 07/29/23 16:18:34.579 -Jul 29 16:18:34.653: INFO: Deleting ReplicationController affinity-clusterip-transition took: 11.65475ms -Jul 29 16:18:34.754: INFO: Terminating ReplicationController affinity-clusterip-transition pods took: 100.989347ms +[It] should be able to change the type from NodePort to ExternalName [Conformance] + test/e2e/network/service.go:1557 +STEP: creating a service nodeport-service with the type=NodePort in namespace services-422 08/24/23 12:22:27.599 +STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service 08/24/23 12:22:27.631 +STEP: creating service externalsvc in namespace services-422 08/24/23 12:22:27.632 +STEP: creating replication controller externalsvc in namespace services-422 08/24/23 12:22:27.661 +I0824 12:22:27.678918 14 runners.go:193] Created replication controller with name: externalsvc, namespace: services-422, replica count: 2 +I0824 12:22:30.732804 14 runners.go:193] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +STEP: changing the NodePort service to type=ExternalName 08/24/23 12:22:30.739 +Aug 24 12:22:30.779: INFO: Creating new exec pod +Aug 24 12:22:30.798: INFO: Waiting up to 5m0s for pod "execpodrsqpl" in namespace "services-422" to be "running" +Aug 24 12:22:30.809: INFO: Pod "execpodrsqpl": Phase="Pending", Reason="", readiness=false. Elapsed: 10.240906ms +Aug 24 12:22:32.819: INFO: Pod "execpodrsqpl": Phase="Running", Reason="", readiness=true. Elapsed: 2.020851298s +Aug 24 12:22:32.820: INFO: Pod "execpodrsqpl" satisfied condition "running" +Aug 24 12:22:32.820: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-422 exec execpodrsqpl -- /bin/sh -x -c nslookup nodeport-service.services-422.svc.cluster.local' +Aug 24 12:22:33.243: INFO: stderr: "+ nslookup nodeport-service.services-422.svc.cluster.local\n" +Aug 24 12:22:33.243: INFO: stdout: "Server:\t\t10.233.0.10\nAddress:\t10.233.0.10#53\n\nnodeport-service.services-422.svc.cluster.local\tcanonical name = externalsvc.services-422.svc.cluster.local.\nName:\texternalsvc.services-422.svc.cluster.local\nAddress: 10.233.44.210\n\n" +STEP: deleting ReplicationController externalsvc in namespace services-422, will wait for the garbage collector to delete the pods 08/24/23 12:22:33.243 +Aug 24 12:22:33.310: INFO: Deleting ReplicationController externalsvc took: 10.241701ms +Aug 24 12:22:33.413: INFO: Terminating ReplicationController externalsvc pods took: 102.23208ms +Aug 24 12:22:35.384: INFO: Cleaning up the NodePort to ExternalName test service [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:36.698: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:22:35.476: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "services-990" for this suite. 07/29/23 16:18:36.725 +STEP: Destroying namespace "services-422" for this suite. 08/24/23 12:22:35.491 ------------------------------ -• [SLOW TEST] [9.924 seconds] +• [SLOW TEST] [8.016 seconds] [sig-network] Services test/e2e/network/common/framework.go:23 - should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] - test/e2e/network/service.go:2213 + should be able to change the type from NodePort to ExternalName [Conformance] + test/e2e/network/service.go:1557 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:26.822 - Jul 29 16:18:26.822: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 16:18:26.824 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:26.861 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:26.869 + STEP: Creating a kubernetes client 08/24/23 12:22:27.551 + Aug 24 12:22:27.551: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 12:22:27.554 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:27.586 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:27.593 [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] Services test/e2e/network/service.go:766 - [It] should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] - test/e2e/network/service.go:2213 - STEP: creating service in namespace services-990 07/29/23 16:18:26.874 - STEP: creating service affinity-clusterip-transition in namespace services-990 07/29/23 16:18:26.874 - STEP: creating replication controller affinity-clusterip-transition in namespace services-990 07/29/23 16:18:26.891 - I0729 16:18:26.909656 13 runners.go:193] Created replication controller with name: affinity-clusterip-transition, namespace: services-990, replica count: 3 - I0729 16:18:29.976930 13 runners.go:193] affinity-clusterip-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - Jul 29 16:18:29.989: INFO: Creating new exec pod - Jul 29 16:18:30.001: INFO: Waiting up to 5m0s for pod "execpod-affinity8qvgd" in namespace "services-990" to be "running" - Jul 29 16:18:30.006: INFO: Pod "execpod-affinity8qvgd": Phase="Pending", Reason="", readiness=false. Elapsed: 5.410456ms - Jul 29 16:18:32.014: INFO: Pod "execpod-affinity8qvgd": Phase="Running", Reason="", readiness=true. Elapsed: 2.013482332s - Jul 29 16:18:32.015: INFO: Pod "execpod-affinity8qvgd" satisfied condition "running" - Jul 29 16:18:33.016: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-990 exec execpod-affinity8qvgd -- /bin/sh -x -c nc -v -z -w 2 affinity-clusterip-transition 80' - Jul 29 16:18:33.292: INFO: stderr: "+ nc -v -z -w 2 affinity-clusterip-transition 80\nConnection to affinity-clusterip-transition 80 port [tcp/http] succeeded!\n" - Jul 29 16:18:33.292: INFO: stdout: "" - Jul 29 16:18:33.292: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-990 exec execpod-affinity8qvgd -- /bin/sh -x -c nc -v -z -w 2 10.233.6.27 80' - Jul 29 16:18:33.556: INFO: stderr: "+ nc -v -z -w 2 10.233.6.27 80\nConnection to 10.233.6.27 80 port [tcp/http] succeeded!\n" - Jul 29 16:18:33.556: INFO: stdout: "" - Jul 29 16:18:33.574: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-990 exec execpod-affinity8qvgd -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.6.27:80/ ; done' - Jul 29 16:18:34.079: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n" - Jul 29 16:18:34.079: INFO: stdout: "\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-cl4ts\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-cl4ts\naffinity-clusterip-transition-k6b7h\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p" - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-cl4ts - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-cl4ts - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-k6b7h - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.079: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.096: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-990 exec execpod-affinity8qvgd -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.6.27:80/ ; done' - Jul 29 16:18:34.554: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.6.27:80/\n" - Jul 29 16:18:34.554: INFO: stdout: "\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p\naffinity-clusterip-transition-p262p" - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Received response from host: affinity-clusterip-transition-p262p - Jul 29 16:18:34.554: INFO: Cleaning up the exec pod - STEP: deleting ReplicationController affinity-clusterip-transition in namespace services-990, will wait for the garbage collector to delete the pods 07/29/23 16:18:34.579 - Jul 29 16:18:34.653: INFO: Deleting ReplicationController affinity-clusterip-transition took: 11.65475ms - Jul 29 16:18:34.754: INFO: Terminating ReplicationController affinity-clusterip-transition pods took: 100.989347ms + [It] should be able to change the type from NodePort to ExternalName [Conformance] + test/e2e/network/service.go:1557 + STEP: creating a service nodeport-service with the type=NodePort in namespace services-422 08/24/23 12:22:27.599 + STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service 08/24/23 12:22:27.631 + STEP: creating service externalsvc in namespace services-422 08/24/23 12:22:27.632 + STEP: creating replication controller externalsvc in namespace services-422 08/24/23 12:22:27.661 + I0824 12:22:27.678918 14 runners.go:193] Created replication controller with name: externalsvc, namespace: services-422, replica count: 2 + I0824 12:22:30.732804 14 runners.go:193] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + STEP: changing the NodePort service to type=ExternalName 08/24/23 12:22:30.739 + Aug 24 12:22:30.779: INFO: Creating new exec pod + Aug 24 12:22:30.798: INFO: Waiting up to 5m0s for pod "execpodrsqpl" in namespace "services-422" to be "running" + Aug 24 12:22:30.809: INFO: Pod "execpodrsqpl": Phase="Pending", Reason="", readiness=false. Elapsed: 10.240906ms + Aug 24 12:22:32.819: INFO: Pod "execpodrsqpl": Phase="Running", Reason="", readiness=true. Elapsed: 2.020851298s + Aug 24 12:22:32.820: INFO: Pod "execpodrsqpl" satisfied condition "running" + Aug 24 12:22:32.820: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-422 exec execpodrsqpl -- /bin/sh -x -c nslookup nodeport-service.services-422.svc.cluster.local' + Aug 24 12:22:33.243: INFO: stderr: "+ nslookup nodeport-service.services-422.svc.cluster.local\n" + Aug 24 12:22:33.243: INFO: stdout: "Server:\t\t10.233.0.10\nAddress:\t10.233.0.10#53\n\nnodeport-service.services-422.svc.cluster.local\tcanonical name = externalsvc.services-422.svc.cluster.local.\nName:\texternalsvc.services-422.svc.cluster.local\nAddress: 10.233.44.210\n\n" + STEP: deleting ReplicationController externalsvc in namespace services-422, will wait for the garbage collector to delete the pods 08/24/23 12:22:33.243 + Aug 24 12:22:33.310: INFO: Deleting ReplicationController externalsvc took: 10.241701ms + Aug 24 12:22:33.413: INFO: Terminating ReplicationController externalsvc pods took: 102.23208ms + Aug 24 12:22:35.384: INFO: Cleaning up the NodePort to ExternalName test service [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:36.698: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:22:35.476: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "services-990" for this suite. 07/29/23 16:18:36.725 + STEP: Destroying namespace "services-422" for this suite. 08/24/23 12:22:35.491 << End Captured GinkgoWriter Output ------------------------------ -[sig-network] Services - should serve multiport endpoints from pods [Conformance] - test/e2e/network/service.go:848 -[BeforeEach] [sig-network] Services +SS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should include webhook resources in discovery documents [Conformance] + test/e2e/apimachinery/webhook.go:117 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:36.748 -Jul 29 16:18:36.748: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 16:18:36.751 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:36.792 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:36.799 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 12:22:35.57 +Aug 24 12:22:35.571: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 12:22:35.575 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:35.62 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:35.626 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should serve multiport endpoints from pods [Conformance] - test/e2e/network/service.go:848 -STEP: creating service multi-endpoint-test in namespace services-3634 07/29/23 16:18:36.805 -STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3634 to expose endpoints map[] 07/29/23 16:18:36.831 -Jul 29 16:18:36.854: INFO: successfully validated that service multi-endpoint-test in namespace services-3634 exposes endpoints map[] -STEP: Creating pod pod1 in namespace services-3634 07/29/23 16:18:36.854 -Jul 29 16:18:36.872: INFO: Waiting up to 5m0s for pod "pod1" in namespace "services-3634" to be "running and ready" -Jul 29 16:18:36.882: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 9.520092ms -Jul 29 16:18:36.882: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:18:38.906: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 2.034130398s -Jul 29 16:18:38.906: INFO: The phase of Pod pod1 is Running (Ready = true) -Jul 29 16:18:38.906: INFO: Pod "pod1" satisfied condition "running and ready" -STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3634 to expose endpoints map[pod1:[100]] 07/29/23 16:18:38.937 -Jul 29 16:18:38.964: INFO: successfully validated that service multi-endpoint-test in namespace services-3634 exposes endpoints map[pod1:[100]] -STEP: Creating pod pod2 in namespace services-3634 07/29/23 16:18:38.965 -Jul 29 16:18:38.974: INFO: Waiting up to 5m0s for pod "pod2" in namespace "services-3634" to be "running and ready" -Jul 29 16:18:38.980: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 5.97028ms -Jul 29 16:18:38.980: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:18:40.990: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 2.01608482s -Jul 29 16:18:40.990: INFO: The phase of Pod pod2 is Running (Ready = true) -Jul 29 16:18:40.991: INFO: Pod "pod2" satisfied condition "running and ready" -STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3634 to expose endpoints map[pod1:[100] pod2:[101]] 07/29/23 16:18:40.998 -Jul 29 16:18:41.024: INFO: successfully validated that service multi-endpoint-test in namespace services-3634 exposes endpoints map[pod1:[100] pod2:[101]] -STEP: Checking if the Service forwards traffic to pods 07/29/23 16:18:41.024 -Jul 29 16:18:41.025: INFO: Creating new exec pod -Jul 29 16:18:41.038: INFO: Waiting up to 5m0s for pod "execpodgzq22" in namespace "services-3634" to be "running" -Jul 29 16:18:41.045: INFO: Pod "execpodgzq22": Phase="Pending", Reason="", readiness=false. Elapsed: 7.45965ms -Jul 29 16:18:43.052: INFO: Pod "execpodgzq22": Phase="Running", Reason="", readiness=true. Elapsed: 2.014465883s -Jul 29 16:18:43.053: INFO: Pod "execpodgzq22" satisfied condition "running" -Jul 29 16:18:44.054: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-3634 exec execpodgzq22 -- /bin/sh -x -c nc -v -z -w 2 multi-endpoint-test 80' -Jul 29 16:18:44.340: INFO: stderr: "+ nc -v -z -w 2 multi-endpoint-test 80\nConnection to multi-endpoint-test 80 port [tcp/http] succeeded!\n" -Jul 29 16:18:44.340: INFO: stdout: "" -Jul 29 16:18:44.341: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-3634 exec execpodgzq22 -- /bin/sh -x -c nc -v -z -w 2 10.233.48.16 80' -Jul 29 16:18:44.613: INFO: stderr: "+ nc -v -z -w 2 10.233.48.16 80\nConnection to 10.233.48.16 80 port [tcp/http] succeeded!\n" -Jul 29 16:18:44.613: INFO: stdout: "" -Jul 29 16:18:44.614: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-3634 exec execpodgzq22 -- /bin/sh -x -c nc -v -z -w 2 multi-endpoint-test 81' -Jul 29 16:18:44.830: INFO: stderr: "+ nc -v -z -w 2 multi-endpoint-test 81\nConnection to multi-endpoint-test 81 port [tcp/*] succeeded!\n" -Jul 29 16:18:44.830: INFO: stdout: "" -Jul 29 16:18:44.830: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-3634 exec execpodgzq22 -- /bin/sh -x -c nc -v -z -w 2 10.233.48.16 81' -Jul 29 16:18:45.088: INFO: stderr: "+ nc -v -z -w 2 10.233.48.16 81\nConnection to 10.233.48.16 81 port [tcp/*] succeeded!\n" -Jul 29 16:18:45.088: INFO: stdout: "" -STEP: Deleting pod pod1 in namespace services-3634 07/29/23 16:18:45.088 -STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3634 to expose endpoints map[pod2:[101]] 07/29/23 16:18:45.113 -Jul 29 16:18:45.145: INFO: successfully validated that service multi-endpoint-test in namespace services-3634 exposes endpoints map[pod2:[101]] -STEP: Deleting pod pod2 in namespace services-3634 07/29/23 16:18:45.146 -STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3634 to expose endpoints map[] 07/29/23 16:18:45.23 -Jul 29 16:18:45.269: INFO: successfully validated that service multi-endpoint-test in namespace services-3634 exposes endpoints map[] -[AfterEach] [sig-network] Services +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 12:22:35.654 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:22:37.013 +STEP: Deploying the webhook pod 08/24/23 12:22:37.022 +STEP: Wait for the deployment to be ready 08/24/23 12:22:37.044 +Aug 24 12:22:37.062: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service 08/24/23 12:22:39.119 +STEP: Verifying the service has paired with the endpoint 08/24/23 12:22:39.147 +Aug 24 12:22:40.148: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should include webhook resources in discovery documents [Conformance] + test/e2e/apimachinery/webhook.go:117 +STEP: fetching the /apis discovery document 08/24/23 12:22:40.157 +STEP: finding the admissionregistration.k8s.io API group in the /apis discovery document 08/24/23 12:22:40.161 +STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis discovery document 08/24/23 12:22:40.161 +STEP: fetching the /apis/admissionregistration.k8s.io discovery document 08/24/23 12:22:40.162 +STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis/admissionregistration.k8s.io discovery document 08/24/23 12:22:40.166 +STEP: fetching the /apis/admissionregistration.k8s.io/v1 discovery document 08/24/23 12:22:40.167 +STEP: finding mutatingwebhookconfigurations and validatingwebhookconfigurations resources in the /apis/admissionregistration.k8s.io/v1 discovery document 08/24/23 12:22:40.169 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/node/init/init.go:32 +Aug 24 12:22:40.169: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + tear down framework | framework.go:193 +STEP: Destroying namespace "webhook-8977" for this suite. 08/24/23 12:22:40.252 +STEP: Destroying namespace "webhook-8977-markers" for this suite. 08/24/23 12:22:40.275 +------------------------------ +• [4.730 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should include webhook resources in discovery documents [Conformance] + test/e2e/apimachinery/webhook.go:117 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:22:35.57 + Aug 24 12:22:35.571: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 12:22:35.575 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:35.62 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:35.626 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 12:22:35.654 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:22:37.013 + STEP: Deploying the webhook pod 08/24/23 12:22:37.022 + STEP: Wait for the deployment to be ready 08/24/23 12:22:37.044 + Aug 24 12:22:37.062: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set + STEP: Deploying the webhook service 08/24/23 12:22:39.119 + STEP: Verifying the service has paired with the endpoint 08/24/23 12:22:39.147 + Aug 24 12:22:40.148: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should include webhook resources in discovery documents [Conformance] + test/e2e/apimachinery/webhook.go:117 + STEP: fetching the /apis discovery document 08/24/23 12:22:40.157 + STEP: finding the admissionregistration.k8s.io API group in the /apis discovery document 08/24/23 12:22:40.161 + STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis discovery document 08/24/23 12:22:40.161 + STEP: fetching the /apis/admissionregistration.k8s.io discovery document 08/24/23 12:22:40.162 + STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis/admissionregistration.k8s.io discovery document 08/24/23 12:22:40.166 + STEP: fetching the /apis/admissionregistration.k8s.io/v1 discovery document 08/24/23 12:22:40.167 + STEP: finding mutatingwebhookconfigurations and validatingwebhookconfigurations resources in the /apis/admissionregistration.k8s.io/v1 discovery document 08/24/23 12:22:40.169 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/node/init/init.go:32 + Aug 24 12:22:40.169: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + tear down framework | framework.go:193 + STEP: Destroying namespace "webhook-8977" for this suite. 08/24/23 12:22:40.252 + STEP: Destroying namespace "webhook-8977-markers" for this suite. 08/24/23 12:22:40.275 + << End Captured GinkgoWriter Output +------------------------------ +SSSS +------------------------------ +[sig-node] RuntimeClass + should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:129 +[BeforeEach] [sig-node] RuntimeClass + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:22:40.303 +Aug 24 12:22:40.304: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename runtimeclass 08/24/23 12:22:40.308 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:40.359 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:40.369 +[BeforeEach] [sig-node] RuntimeClass + test/e2e/framework/metrics/init/init.go:31 +[It] should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:129 +Aug 24 12:22:40.408: INFO: Waiting up to 1m20s for at least 1 pods in namespace runtimeclass-4387 to be scheduled +Aug 24 12:22:40.419: INFO: 1 pods are not scheduled: [runtimeclass-4387/test-runtimeclass-runtimeclass-4387-preconfigured-handler-2kvvr(d6f39fd1-77f5-4c72-8305-d951cc0153db)] +[AfterEach] [sig-node] RuntimeClass + test/e2e/framework/node/init/init.go:32 +Aug 24 12:22:42.443: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] RuntimeClass + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-node] RuntimeClass + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-node] RuntimeClass + tear down framework | framework.go:193 +STEP: Destroying namespace "runtimeclass-4387" for this suite. 08/24/23 12:22:42.455 +------------------------------ +• [2.167 seconds] +[sig-node] RuntimeClass +test/e2e/common/node/framework.go:23 + should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:129 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-node] RuntimeClass + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:22:40.303 + Aug 24 12:22:40.304: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename runtimeclass 08/24/23 12:22:40.308 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:40.359 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:40.369 + [BeforeEach] [sig-node] RuntimeClass + test/e2e/framework/metrics/init/init.go:31 + [It] should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:129 + Aug 24 12:22:40.408: INFO: Waiting up to 1m20s for at least 1 pods in namespace runtimeclass-4387 to be scheduled + Aug 24 12:22:40.419: INFO: 1 pods are not scheduled: [runtimeclass-4387/test-runtimeclass-runtimeclass-4387-preconfigured-handler-2kvvr(d6f39fd1-77f5-4c72-8305-d951cc0153db)] + [AfterEach] [sig-node] RuntimeClass + test/e2e/framework/node/init/init.go:32 + Aug 24 12:22:42.443: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] RuntimeClass + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-node] RuntimeClass + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-node] RuntimeClass + tear down framework | framework.go:193 + STEP: Destroying namespace "runtimeclass-4387" for this suite. 08/24/23 12:22:42.455 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:215 +[BeforeEach] [sig-storage] Projected secret + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:22:42.48 +Aug 24 12:22:42.480: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:22:42.483 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:42.516 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:42.519 +[BeforeEach] [sig-storage] Projected secret + test/e2e/framework/metrics/init/init.go:31 +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:215 +STEP: Creating secret with name s-test-opt-del-938ec7f2-cc01-43fd-87a6-762acad423ee 08/24/23 12:22:42.532 +STEP: Creating secret with name s-test-opt-upd-a0fe756a-500b-44f6-ad05-49b20e651f8d 08/24/23 12:22:42.544 +STEP: Creating the pod 08/24/23 12:22:42.556 +Aug 24 12:22:42.575: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27" in namespace "projected-7330" to be "running and ready" +Aug 24 12:22:42.589: INFO: Pod "pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27": Phase="Pending", Reason="", readiness=false. Elapsed: 13.79241ms +Aug 24 12:22:42.589: INFO: The phase of Pod pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:22:44.596: INFO: Pod "pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020304127s +Aug 24 12:22:44.596: INFO: The phase of Pod pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:22:46.599: INFO: Pod "pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27": Phase="Running", Reason="", readiness=true. Elapsed: 4.023798649s +Aug 24 12:22:46.599: INFO: The phase of Pod pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27 is Running (Ready = true) +Aug 24 12:22:46.600: INFO: Pod "pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27" satisfied condition "running and ready" +STEP: Deleting secret s-test-opt-del-938ec7f2-cc01-43fd-87a6-762acad423ee 08/24/23 12:22:46.643 +STEP: Updating secret s-test-opt-upd-a0fe756a-500b-44f6-ad05-49b20e651f8d 08/24/23 12:22:46.655 +STEP: Creating secret with name s-test-opt-create-f747ad0c-1ee1-4f5a-a638-253222cd3ea0 08/24/23 12:22:46.668 +STEP: waiting to observe update in volume 08/24/23 12:22:46.68 +[AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:45.345: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 12:23:55.366: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 -STEP: Destroying namespace "services-3634" for this suite. 07/29/23 16:18:45.358 +STEP: Destroying namespace "projected-7330" for this suite. 08/24/23 12:23:55.379 ------------------------------ -• [SLOW TEST] [8.628 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should serve multiport endpoints from pods [Conformance] - test/e2e/network/service.go:848 +• [SLOW TEST] [72.918 seconds] +[sig-storage] Projected secret +test/e2e/common/storage/framework.go:23 + optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:215 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:36.748 - Jul 29 16:18:36.748: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 16:18:36.751 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:36.792 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:36.799 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 12:22:42.48 + Aug 24 12:22:42.480: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:22:42.483 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:22:42.516 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:22:42.519 + [BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should serve multiport endpoints from pods [Conformance] - test/e2e/network/service.go:848 - STEP: creating service multi-endpoint-test in namespace services-3634 07/29/23 16:18:36.805 - STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3634 to expose endpoints map[] 07/29/23 16:18:36.831 - Jul 29 16:18:36.854: INFO: successfully validated that service multi-endpoint-test in namespace services-3634 exposes endpoints map[] - STEP: Creating pod pod1 in namespace services-3634 07/29/23 16:18:36.854 - Jul 29 16:18:36.872: INFO: Waiting up to 5m0s for pod "pod1" in namespace "services-3634" to be "running and ready" - Jul 29 16:18:36.882: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 9.520092ms - Jul 29 16:18:36.882: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:18:38.906: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 2.034130398s - Jul 29 16:18:38.906: INFO: The phase of Pod pod1 is Running (Ready = true) - Jul 29 16:18:38.906: INFO: Pod "pod1" satisfied condition "running and ready" - STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3634 to expose endpoints map[pod1:[100]] 07/29/23 16:18:38.937 - Jul 29 16:18:38.964: INFO: successfully validated that service multi-endpoint-test in namespace services-3634 exposes endpoints map[pod1:[100]] - STEP: Creating pod pod2 in namespace services-3634 07/29/23 16:18:38.965 - Jul 29 16:18:38.974: INFO: Waiting up to 5m0s for pod "pod2" in namespace "services-3634" to be "running and ready" - Jul 29 16:18:38.980: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 5.97028ms - Jul 29 16:18:38.980: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:18:40.990: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 2.01608482s - Jul 29 16:18:40.990: INFO: The phase of Pod pod2 is Running (Ready = true) - Jul 29 16:18:40.991: INFO: Pod "pod2" satisfied condition "running and ready" - STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3634 to expose endpoints map[pod1:[100] pod2:[101]] 07/29/23 16:18:40.998 - Jul 29 16:18:41.024: INFO: successfully validated that service multi-endpoint-test in namespace services-3634 exposes endpoints map[pod1:[100] pod2:[101]] - STEP: Checking if the Service forwards traffic to pods 07/29/23 16:18:41.024 - Jul 29 16:18:41.025: INFO: Creating new exec pod - Jul 29 16:18:41.038: INFO: Waiting up to 5m0s for pod "execpodgzq22" in namespace "services-3634" to be "running" - Jul 29 16:18:41.045: INFO: Pod "execpodgzq22": Phase="Pending", Reason="", readiness=false. Elapsed: 7.45965ms - Jul 29 16:18:43.052: INFO: Pod "execpodgzq22": Phase="Running", Reason="", readiness=true. Elapsed: 2.014465883s - Jul 29 16:18:43.053: INFO: Pod "execpodgzq22" satisfied condition "running" - Jul 29 16:18:44.054: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-3634 exec execpodgzq22 -- /bin/sh -x -c nc -v -z -w 2 multi-endpoint-test 80' - Jul 29 16:18:44.340: INFO: stderr: "+ nc -v -z -w 2 multi-endpoint-test 80\nConnection to multi-endpoint-test 80 port [tcp/http] succeeded!\n" - Jul 29 16:18:44.340: INFO: stdout: "" - Jul 29 16:18:44.341: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-3634 exec execpodgzq22 -- /bin/sh -x -c nc -v -z -w 2 10.233.48.16 80' - Jul 29 16:18:44.613: INFO: stderr: "+ nc -v -z -w 2 10.233.48.16 80\nConnection to 10.233.48.16 80 port [tcp/http] succeeded!\n" - Jul 29 16:18:44.613: INFO: stdout: "" - Jul 29 16:18:44.614: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-3634 exec execpodgzq22 -- /bin/sh -x -c nc -v -z -w 2 multi-endpoint-test 81' - Jul 29 16:18:44.830: INFO: stderr: "+ nc -v -z -w 2 multi-endpoint-test 81\nConnection to multi-endpoint-test 81 port [tcp/*] succeeded!\n" - Jul 29 16:18:44.830: INFO: stdout: "" - Jul 29 16:18:44.830: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-3634 exec execpodgzq22 -- /bin/sh -x -c nc -v -z -w 2 10.233.48.16 81' - Jul 29 16:18:45.088: INFO: stderr: "+ nc -v -z -w 2 10.233.48.16 81\nConnection to 10.233.48.16 81 port [tcp/*] succeeded!\n" - Jul 29 16:18:45.088: INFO: stdout: "" - STEP: Deleting pod pod1 in namespace services-3634 07/29/23 16:18:45.088 - STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3634 to expose endpoints map[pod2:[101]] 07/29/23 16:18:45.113 - Jul 29 16:18:45.145: INFO: successfully validated that service multi-endpoint-test in namespace services-3634 exposes endpoints map[pod2:[101]] - STEP: Deleting pod pod2 in namespace services-3634 07/29/23 16:18:45.146 - STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-3634 to expose endpoints map[] 07/29/23 16:18:45.23 - Jul 29 16:18:45.269: INFO: successfully validated that service multi-endpoint-test in namespace services-3634 exposes endpoints map[] - [AfterEach] [sig-network] Services + [It] optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:215 + STEP: Creating secret with name s-test-opt-del-938ec7f2-cc01-43fd-87a6-762acad423ee 08/24/23 12:22:42.532 + STEP: Creating secret with name s-test-opt-upd-a0fe756a-500b-44f6-ad05-49b20e651f8d 08/24/23 12:22:42.544 + STEP: Creating the pod 08/24/23 12:22:42.556 + Aug 24 12:22:42.575: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27" in namespace "projected-7330" to be "running and ready" + Aug 24 12:22:42.589: INFO: Pod "pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27": Phase="Pending", Reason="", readiness=false. Elapsed: 13.79241ms + Aug 24 12:22:42.589: INFO: The phase of Pod pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:22:44.596: INFO: Pod "pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020304127s + Aug 24 12:22:44.596: INFO: The phase of Pod pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:22:46.599: INFO: Pod "pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27": Phase="Running", Reason="", readiness=true. Elapsed: 4.023798649s + Aug 24 12:22:46.599: INFO: The phase of Pod pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27 is Running (Ready = true) + Aug 24 12:22:46.600: INFO: Pod "pod-projected-secrets-25974e37-07a6-4a5c-9590-023d306bfd27" satisfied condition "running and ready" + STEP: Deleting secret s-test-opt-del-938ec7f2-cc01-43fd-87a6-762acad423ee 08/24/23 12:22:46.643 + STEP: Updating secret s-test-opt-upd-a0fe756a-500b-44f6-ad05-49b20e651f8d 08/24/23 12:22:46.655 + STEP: Creating secret with name s-test-opt-create-f747ad0c-1ee1-4f5a-a638-253222cd3ea0 08/24/23 12:22:46.668 + STEP: waiting to observe update in volume 08/24/23 12:22:46.68 + [AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:45.345: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 12:23:55.366: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 - STEP: Destroying namespace "services-3634" for this suite. 07/29/23 16:18:45.358 + STEP: Destroying namespace "projected-7330" for this suite. 08/24/23 12:23:55.379 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSS +SSSSSSSSS ------------------------------ [sig-storage] Projected secret - optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:215 + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:88 [BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:45.392 -Jul 29 16:18:45.392: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:18:45.397 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:45.44 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:45.445 +STEP: Creating a kubernetes client 08/24/23 12:23:55.399 +Aug 24 12:23:55.400: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:23:55.405 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:23:55.448 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:23:55.454 [BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 -[It] optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:215 -STEP: Creating secret with name s-test-opt-del-039b115b-8b57-4eeb-b10b-78c3b8d49619 07/29/23 16:18:45.458 -STEP: Creating secret with name s-test-opt-upd-33f490e2-f058-4684-8e87-6bf9c27d8854 07/29/23 16:18:45.467 -STEP: Creating the pod 07/29/23 16:18:45.476 -Jul 29 16:18:45.501: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99" in namespace "projected-9395" to be "running and ready" -Jul 29 16:18:45.508: INFO: Pod "pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99": Phase="Pending", Reason="", readiness=false. Elapsed: 7.3188ms -Jul 29 16:18:45.509: INFO: The phase of Pod pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:18:47.519: INFO: Pod "pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99": Phase="Running", Reason="", readiness=true. Elapsed: 2.017765379s -Jul 29 16:18:47.519: INFO: The phase of Pod pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99 is Running (Ready = true) -Jul 29 16:18:47.519: INFO: Pod "pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99" satisfied condition "running and ready" -STEP: Deleting secret s-test-opt-del-039b115b-8b57-4eeb-b10b-78c3b8d49619 07/29/23 16:18:47.564 -STEP: Updating secret s-test-opt-upd-33f490e2-f058-4684-8e87-6bf9c27d8854 07/29/23 16:18:47.575 -STEP: Creating secret with name s-test-opt-create-110fa739-0e17-404b-9cbc-2baedcd8042b 07/29/23 16:18:47.585 -STEP: waiting to observe update in volume 07/29/23 16:18:47.593 +[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:88 +STEP: Creating projection with secret that has name projected-secret-test-map-0f2edd49-cd0e-4f35-b8c8-9689e2d30b0f 08/24/23 12:23:55.46 +STEP: Creating a pod to test consume secrets 08/24/23 12:23:55.471 +Aug 24 12:23:55.489: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1" in namespace "projected-1911" to be "Succeeded or Failed" +Aug 24 12:23:55.500: INFO: Pod "pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1": Phase="Pending", Reason="", readiness=false. Elapsed: 10.822068ms +Aug 24 12:23:57.508: INFO: Pod "pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018408687s +Aug 24 12:23:59.509: INFO: Pod "pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019373657s +STEP: Saw pod success 08/24/23 12:23:59.509 +Aug 24 12:23:59.509: INFO: Pod "pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1" satisfied condition "Succeeded or Failed" +Aug 24 12:23:59.515: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1 container projected-secret-volume-test: +STEP: delete the pod 08/24/23 12:23:59.528 +Aug 24 12:23:59.554: INFO: Waiting for pod pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1 to disappear +Aug 24 12:23:59.560: INFO: Pod pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1 no longer exists [AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:49.644: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:23:59.560: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 -STEP: Destroying namespace "projected-9395" for this suite. 07/29/23 16:18:49.653 +STEP: Destroying namespace "projected-1911" for this suite. 08/24/23 12:23:59.57 ------------------------------ -• [4.275 seconds] +• [4.193 seconds] [sig-storage] Projected secret test/e2e/common/storage/framework.go:23 - optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:215 + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:88 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:45.392 - Jul 29 16:18:45.392: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:18:45.397 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:45.44 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:45.445 + STEP: Creating a kubernetes client 08/24/23 12:23:55.399 + Aug 24 12:23:55.400: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:23:55.405 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:23:55.448 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:23:55.454 [BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 - [It] optional updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:215 - STEP: Creating secret with name s-test-opt-del-039b115b-8b57-4eeb-b10b-78c3b8d49619 07/29/23 16:18:45.458 - STEP: Creating secret with name s-test-opt-upd-33f490e2-f058-4684-8e87-6bf9c27d8854 07/29/23 16:18:45.467 - STEP: Creating the pod 07/29/23 16:18:45.476 - Jul 29 16:18:45.501: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99" in namespace "projected-9395" to be "running and ready" - Jul 29 16:18:45.508: INFO: Pod "pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99": Phase="Pending", Reason="", readiness=false. Elapsed: 7.3188ms - Jul 29 16:18:45.509: INFO: The phase of Pod pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:18:47.519: INFO: Pod "pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99": Phase="Running", Reason="", readiness=true. Elapsed: 2.017765379s - Jul 29 16:18:47.519: INFO: The phase of Pod pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99 is Running (Ready = true) - Jul 29 16:18:47.519: INFO: Pod "pod-projected-secrets-b3ccd31f-0c93-4434-b72a-ba323c1bef99" satisfied condition "running and ready" - STEP: Deleting secret s-test-opt-del-039b115b-8b57-4eeb-b10b-78c3b8d49619 07/29/23 16:18:47.564 - STEP: Updating secret s-test-opt-upd-33f490e2-f058-4684-8e87-6bf9c27d8854 07/29/23 16:18:47.575 - STEP: Creating secret with name s-test-opt-create-110fa739-0e17-404b-9cbc-2baedcd8042b 07/29/23 16:18:47.585 - STEP: waiting to observe update in volume 07/29/23 16:18:47.593 + [It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:88 + STEP: Creating projection with secret that has name projected-secret-test-map-0f2edd49-cd0e-4f35-b8c8-9689e2d30b0f 08/24/23 12:23:55.46 + STEP: Creating a pod to test consume secrets 08/24/23 12:23:55.471 + Aug 24 12:23:55.489: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1" in namespace "projected-1911" to be "Succeeded or Failed" + Aug 24 12:23:55.500: INFO: Pod "pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1": Phase="Pending", Reason="", readiness=false. Elapsed: 10.822068ms + Aug 24 12:23:57.508: INFO: Pod "pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018408687s + Aug 24 12:23:59.509: INFO: Pod "pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019373657s + STEP: Saw pod success 08/24/23 12:23:59.509 + Aug 24 12:23:59.509: INFO: Pod "pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1" satisfied condition "Succeeded or Failed" + Aug 24 12:23:59.515: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1 container projected-secret-volume-test: + STEP: delete the pod 08/24/23 12:23:59.528 + Aug 24 12:23:59.554: INFO: Waiting for pod pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1 to disappear + Aug 24 12:23:59.560: INFO: Pod pod-projected-secrets-1e03aaa4-29e0-46bc-b02e-ea445fffc5e1 no longer exists [AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:49.644: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:23:59.560: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 - STEP: Destroying namespace "projected-9395" for this suite. 07/29/23 16:18:49.653 + STEP: Destroying namespace "projected-1911" for this suite. 08/24/23 12:23:59.57 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl api-versions - should check if v1 is in available api versions [Conformance] - test/e2e/kubectl/kubectl.go:824 -[BeforeEach] [sig-cli] Kubectl client +[sig-node] Pods + should run through the lifecycle of Pods and PodStatus [Conformance] + test/e2e/common/node/pods.go:896 +[BeforeEach] [sig-node] Pods set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:49.673 -Jul 29 16:18:49.673: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:18:49.676 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:49.705 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:49.709 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 12:23:59.597 +Aug 24 12:23:59.597: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 12:23:59.599 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:23:59.685 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:23:59.691 +[BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[It] should check if v1 is in available api versions [Conformance] - test/e2e/kubectl/kubectl.go:824 -STEP: validating api versions 07/29/23 16:18:49.714 -Jul 29 16:18:49.714: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3034 api-versions' -Jul 29 16:18:49.932: INFO: stderr: "" -Jul 29 16:18:49.932: INFO: stdout: "admissionregistration.k8s.io/v1\napiextensions.k8s.io/v1\napiregistration.k8s.io/v1\napps/v1\nauthentication.k8s.io/v1\nauthorization.k8s.io/v1\nautoscaling/v1\nautoscaling/v2\nbatch/v1\ncertificates.k8s.io/v1\ncilium.io/v2\ncilium.io/v2alpha1\ncoordination.k8s.io/v1\ndiscovery.k8s.io/v1\nevents.k8s.io/v1\nflowcontrol.apiserver.k8s.io/v1beta2\nflowcontrol.apiserver.k8s.io/v1beta3\nnetworking.k8s.io/v1\nnode.k8s.io/v1\npolicy/v1\nrbac.authorization.k8s.io/v1\nscheduling.k8s.io/v1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n" -[AfterEach] [sig-cli] Kubectl client +[BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 +[It] should run through the lifecycle of Pods and PodStatus [Conformance] + test/e2e/common/node/pods.go:896 +STEP: creating a Pod with a static label 08/24/23 12:23:59.711 +STEP: watching for Pod to be ready 08/24/23 12:23:59.734 +Aug 24 12:23:59.737: INFO: observed Pod pod-test in namespace pods-4062 in phase Pending with labels: map[test-pod-static:true] & conditions [] +Aug 24 12:23:59.743: INFO: observed Pod pod-test in namespace pods-4062 in phase Pending with labels: map[test-pod-static:true] & conditions [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC }] +Aug 24 12:23:59.765: INFO: observed Pod pod-test in namespace pods-4062 in phase Pending with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC }] +Aug 24 12:24:01.337: INFO: Found Pod pod-test in namespace pods-4062 in phase Running with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:24:01 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:24:01 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC }] +STEP: patching the Pod with a new Label and updated data 08/24/23 12:24:01.342 +STEP: getting the Pod and ensuring that it's patched 08/24/23 12:24:01.367 +STEP: replacing the Pod's status Ready condition to False 08/24/23 12:24:01.375 +STEP: check the Pod again to ensure its Ready conditions are False 08/24/23 12:24:01.396 +STEP: deleting the Pod via a Collection with a LabelSelector 08/24/23 12:24:01.397 +STEP: watching for the Pod to be deleted 08/24/23 12:24:01.414 +Aug 24 12:24:01.418: INFO: observed event type MODIFIED +Aug 24 12:24:03.358: INFO: observed event type MODIFIED +Aug 24 12:24:04.371: INFO: observed event type MODIFIED +Aug 24 12:24:04.384: INFO: observed event type MODIFIED +[AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:49.934: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 12:24:04.403: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-3034" for this suite. 07/29/23 16:18:49.945 +STEP: Destroying namespace "pods-4062" for this suite. 08/24/23 12:24:04.413 ------------------------------ -• [0.285 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Kubectl api-versions - test/e2e/kubectl/kubectl.go:818 - should check if v1 is in available api versions [Conformance] - test/e2e/kubectl/kubectl.go:824 +• [4.826 seconds] +[sig-node] Pods +test/e2e/common/node/framework.go:23 + should run through the lifecycle of Pods and PodStatus [Conformance] + test/e2e/common/node/pods.go:896 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-node] Pods set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:49.673 - Jul 29 16:18:49.673: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:18:49.676 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:49.705 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:49.709 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 12:23:59.597 + Aug 24 12:23:59.597: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 12:23:59.599 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:23:59.685 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:23:59.691 + [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [It] should check if v1 is in available api versions [Conformance] - test/e2e/kubectl/kubectl.go:824 - STEP: validating api versions 07/29/23 16:18:49.714 - Jul 29 16:18:49.714: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3034 api-versions' - Jul 29 16:18:49.932: INFO: stderr: "" - Jul 29 16:18:49.932: INFO: stdout: "admissionregistration.k8s.io/v1\napiextensions.k8s.io/v1\napiregistration.k8s.io/v1\napps/v1\nauthentication.k8s.io/v1\nauthorization.k8s.io/v1\nautoscaling/v1\nautoscaling/v2\nbatch/v1\ncertificates.k8s.io/v1\ncilium.io/v2\ncilium.io/v2alpha1\ncoordination.k8s.io/v1\ndiscovery.k8s.io/v1\nevents.k8s.io/v1\nflowcontrol.apiserver.k8s.io/v1beta2\nflowcontrol.apiserver.k8s.io/v1beta3\nnetworking.k8s.io/v1\nnode.k8s.io/v1\npolicy/v1\nrbac.authorization.k8s.io/v1\nscheduling.k8s.io/v1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n" - [AfterEach] [sig-cli] Kubectl client + [BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 + [It] should run through the lifecycle of Pods and PodStatus [Conformance] + test/e2e/common/node/pods.go:896 + STEP: creating a Pod with a static label 08/24/23 12:23:59.711 + STEP: watching for Pod to be ready 08/24/23 12:23:59.734 + Aug 24 12:23:59.737: INFO: observed Pod pod-test in namespace pods-4062 in phase Pending with labels: map[test-pod-static:true] & conditions [] + Aug 24 12:23:59.743: INFO: observed Pod pod-test in namespace pods-4062 in phase Pending with labels: map[test-pod-static:true] & conditions [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC }] + Aug 24 12:23:59.765: INFO: observed Pod pod-test in namespace pods-4062 in phase Pending with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC }] + Aug 24 12:24:01.337: INFO: Found Pod pod-test in namespace pods-4062 in phase Running with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:24:01 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:24:01 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:23:59 +0000 UTC }] + STEP: patching the Pod with a new Label and updated data 08/24/23 12:24:01.342 + STEP: getting the Pod and ensuring that it's patched 08/24/23 12:24:01.367 + STEP: replacing the Pod's status Ready condition to False 08/24/23 12:24:01.375 + STEP: check the Pod again to ensure its Ready conditions are False 08/24/23 12:24:01.396 + STEP: deleting the Pod via a Collection with a LabelSelector 08/24/23 12:24:01.397 + STEP: watching for the Pod to be deleted 08/24/23 12:24:01.414 + Aug 24 12:24:01.418: INFO: observed event type MODIFIED + Aug 24 12:24:03.358: INFO: observed event type MODIFIED + Aug 24 12:24:04.371: INFO: observed event type MODIFIED + Aug 24 12:24:04.384: INFO: observed event type MODIFIED + [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:49.934: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 12:24:04.403: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-3034" for this suite. 07/29/23 16:18:49.945 + STEP: Destroying namespace "pods-4062" for this suite. 08/24/23 12:24:04.413 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSS ------------------------------ -[sig-storage] ConfigMap - should be immutable if `immutable` field is set [Conformance] - test/e2e/common/storage/configmap_volume.go:504 -[BeforeEach] [sig-storage] ConfigMap +[sig-apps] Deployment + deployment should support rollover [Conformance] + test/e2e/apps/deployment.go:132 +[BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:49.962 -Jul 29 16:18:49.962: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:18:49.966 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:50.002 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:50.008 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 12:24:04.424 +Aug 24 12:24:04.424: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename deployment 08/24/23 12:24:04.428 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:24:04.459 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:24:04.463 +[BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 -[It] should be immutable if `immutable` field is set [Conformance] - test/e2e/common/storage/configmap_volume.go:504 -[AfterEach] [sig-storage] ConfigMap +[BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 +[It] deployment should support rollover [Conformance] + test/e2e/apps/deployment.go:132 +Aug 24 12:24:04.487: INFO: Pod name rollover-pod: Found 0 pods out of 1 +Aug 24 12:24:09.499: INFO: Pod name rollover-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running 08/24/23 12:24:09.499 +Aug 24 12:24:09.500: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready +Aug 24 12:24:11.508: INFO: Creating deployment "test-rollover-deployment" +Aug 24 12:24:11.536: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations +Aug 24 12:24:13.553: INFO: Check revision of new replica set for deployment "test-rollover-deployment" +Aug 24 12:24:13.565: INFO: Ensure that both replica sets have 1 created replica +Aug 24 12:24:13.576: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update +Aug 24 12:24:13.594: INFO: Updating deployment test-rollover-deployment +Aug 24 12:24:13.594: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller +Aug 24 12:24:15.611: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2 +Aug 24 12:24:15.623: INFO: Make sure deployment "test-rollover-deployment" is complete +Aug 24 12:24:15.635: INFO: all replica sets need to contain the pod-template-hash label +Aug 24 12:24:15.635: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 15, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:24:17.654: INFO: all replica sets need to contain the pod-template-hash label +Aug 24 12:24:17.654: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 15, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:24:19.654: INFO: all replica sets need to contain the pod-template-hash label +Aug 24 12:24:19.654: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 15, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:24:21.650: INFO: all replica sets need to contain the pod-template-hash label +Aug 24 12:24:21.650: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 15, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:24:23.652: INFO: all replica sets need to contain the pod-template-hash label +Aug 24 12:24:23.653: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 15, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} +Aug 24 12:24:25.655: INFO: +Aug 24 12:24:25.655: INFO: Ensure that both old replica sets have no replicas +[AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 +Aug 24 12:24:25.675: INFO: Deployment "test-rollover-deployment": +&Deployment{ObjectMeta:{test-rollover-deployment deployment-6194 3a253bf1-2f34-467e-804e-d9eda488d067 19583 2 2023-08-24 12:24:11 +0000 UTC map[name:rollover-pod] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2023-08-24 12:24:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:minReadySeconds":{},"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:24:25 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00429eba8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-08-24 12:24:11 +0000 UTC,LastTransitionTime:2023-08-24 12:24:11 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rollover-deployment-6c6df9974f" has successfully progressed.,LastUpdateTime:2023-08-24 12:24:25 +0000 UTC,LastTransitionTime:2023-08-24 12:24:11 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Aug 24 12:24:25.685: INFO: New ReplicaSet "test-rollover-deployment-6c6df9974f" of Deployment "test-rollover-deployment": +&ReplicaSet{ObjectMeta:{test-rollover-deployment-6c6df9974f deployment-6194 082d9606-3e45-4f9d-85f1-35af12302a59 19572 2 2023-08-24 12:24:13 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-rollover-deployment 3a253bf1-2f34-467e-804e-d9eda488d067 0xc00708ce17 0xc00708ce18}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:24:13 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"3a253bf1-2f34-467e-804e-d9eda488d067\"}":{}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:24:25 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6c6df9974f,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00708cec8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:24:25.685: INFO: All old ReplicaSets of Deployment "test-rollover-deployment": +Aug 24 12:24:25.685: INFO: &ReplicaSet{ObjectMeta:{test-rollover-controller deployment-6194 02ba96ae-0477-4111-8b49-8e766acad8ef 19582 2 2023-08-24 12:24:04 +0000 UTC map[name:rollover-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2] [{apps/v1 Deployment test-rollover-deployment 3a253bf1-2f34-467e-804e-d9eda488d067 0xc00708cce7 0xc00708cce8}] [] [{e2e.test Update apps/v1 2023-08-24 12:24:04 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:24:25 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"3a253bf1-2f34-467e-804e-d9eda488d067\"}":{}}},"f:spec":{"f:replicas":{}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:24:25 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc00708cda8 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:24:25.685: INFO: &ReplicaSet{ObjectMeta:{test-rollover-deployment-768dcbc65b deployment-6194 4ed4bcfe-839a-44e8-8d3a-e7632da07a96 19532 2 2023-08-24 12:24:11 +0000 UTC map[name:rollover-pod pod-template-hash:768dcbc65b] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-rollover-deployment 3a253bf1-2f34-467e-804e-d9eda488d067 0xc00708cf47 0xc00708cf48}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:24:13 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"3a253bf1-2f34-467e-804e-d9eda488d067\"}":{}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"redis-slave\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:24:13 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 768dcbc65b,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:768dcbc65b] map[] [] [] []} {[] [] [{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00708cff8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:24:25.697: INFO: Pod "test-rollover-deployment-6c6df9974f-xvgz8" is available: +&Pod{ObjectMeta:{test-rollover-deployment-6c6df9974f-xvgz8 test-rollover-deployment-6c6df9974f- deployment-6194 7c44eb38-db0e-4a1c-bdd9-39b740feee3a 19545 0 2023-08-24 12:24:13 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[] [{apps/v1 ReplicaSet test-rollover-deployment-6c6df9974f 082d9606-3e45-4f9d-85f1-35af12302a59 0xc00708d567 0xc00708d568}] [] [{kube-controller-manager Update v1 2023-08-24 12:24:13 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"082d9606-3e45-4f9d-85f1-35af12302a59\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:24:15 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.125\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-2qvw4,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2qvw4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:24:13 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:24:15 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:24:15 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:24:13 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.125,StartTime:2023-08-24 12:24:13 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:24:14 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,ImageID:registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e,ContainerID:cri-o://b60a484e7ec4b92a1b8df72bd4bfe8273c631898b5e9f0a796b442d913977964,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.125,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:50.088: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 12:24:25.698: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-6002" for this suite. 07/29/23 16:18:50.096 +STEP: Destroying namespace "deployment-6194" for this suite. 08/24/23 12:24:25.709 ------------------------------ -• [0.146 seconds] -[sig-storage] ConfigMap -test/e2e/common/storage/framework.go:23 - should be immutable if `immutable` field is set [Conformance] - test/e2e/common/storage/configmap_volume.go:504 +• [SLOW TEST] [21.307 seconds] +[sig-apps] Deployment +test/e2e/apps/framework.go:23 + deployment should support rollover [Conformance] + test/e2e/apps/deployment.go:132 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:49.962 - Jul 29 16:18:49.962: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:18:49.966 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:50.002 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:50.008 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 12:24:04.424 + Aug 24 12:24:04.424: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename deployment 08/24/23 12:24:04.428 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:24:04.459 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:24:04.463 + [BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 - [It] should be immutable if `immutable` field is set [Conformance] - test/e2e/common/storage/configmap_volume.go:504 - [AfterEach] [sig-storage] ConfigMap + [BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 + [It] deployment should support rollover [Conformance] + test/e2e/apps/deployment.go:132 + Aug 24 12:24:04.487: INFO: Pod name rollover-pod: Found 0 pods out of 1 + Aug 24 12:24:09.499: INFO: Pod name rollover-pod: Found 1 pods out of 1 + STEP: ensuring each pod is running 08/24/23 12:24:09.499 + Aug 24 12:24:09.500: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready + Aug 24 12:24:11.508: INFO: Creating deployment "test-rollover-deployment" + Aug 24 12:24:11.536: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations + Aug 24 12:24:13.553: INFO: Check revision of new replica set for deployment "test-rollover-deployment" + Aug 24 12:24:13.565: INFO: Ensure that both replica sets have 1 created replica + Aug 24 12:24:13.576: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update + Aug 24 12:24:13.594: INFO: Updating deployment test-rollover-deployment + Aug 24 12:24:13.594: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller + Aug 24 12:24:15.611: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2 + Aug 24 12:24:15.623: INFO: Make sure deployment "test-rollover-deployment" is complete + Aug 24 12:24:15.635: INFO: all replica sets need to contain the pod-template-hash label + Aug 24 12:24:15.635: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 15, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:24:17.654: INFO: all replica sets need to contain the pod-template-hash label + Aug 24 12:24:17.654: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 15, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:24:19.654: INFO: all replica sets need to contain the pod-template-hash label + Aug 24 12:24:19.654: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 15, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:24:21.650: INFO: all replica sets need to contain the pod-template-hash label + Aug 24 12:24:21.650: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 15, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:24:23.652: INFO: all replica sets need to contain the pod-template-hash label + Aug 24 12:24:23.653: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 24, 15, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 24, 11, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} + Aug 24 12:24:25.655: INFO: + Aug 24 12:24:25.655: INFO: Ensure that both old replica sets have no replicas + [AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 + Aug 24 12:24:25.675: INFO: Deployment "test-rollover-deployment": + &Deployment{ObjectMeta:{test-rollover-deployment deployment-6194 3a253bf1-2f34-467e-804e-d9eda488d067 19583 2 2023-08-24 12:24:11 +0000 UTC map[name:rollover-pod] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2023-08-24 12:24:13 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:minReadySeconds":{},"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:24:25 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00429eba8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-08-24 12:24:11 +0000 UTC,LastTransitionTime:2023-08-24 12:24:11 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rollover-deployment-6c6df9974f" has successfully progressed.,LastUpdateTime:2023-08-24 12:24:25 +0000 UTC,LastTransitionTime:2023-08-24 12:24:11 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + + Aug 24 12:24:25.685: INFO: New ReplicaSet "test-rollover-deployment-6c6df9974f" of Deployment "test-rollover-deployment": + &ReplicaSet{ObjectMeta:{test-rollover-deployment-6c6df9974f deployment-6194 082d9606-3e45-4f9d-85f1-35af12302a59 19572 2 2023-08-24 12:24:13 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-rollover-deployment 3a253bf1-2f34-467e-804e-d9eda488d067 0xc00708ce17 0xc00708ce18}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:24:13 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"3a253bf1-2f34-467e-804e-d9eda488d067\"}":{}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:24:25 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6c6df9974f,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00708cec8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:24:25.685: INFO: All old ReplicaSets of Deployment "test-rollover-deployment": + Aug 24 12:24:25.685: INFO: &ReplicaSet{ObjectMeta:{test-rollover-controller deployment-6194 02ba96ae-0477-4111-8b49-8e766acad8ef 19582 2 2023-08-24 12:24:04 +0000 UTC map[name:rollover-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2] [{apps/v1 Deployment test-rollover-deployment 3a253bf1-2f34-467e-804e-d9eda488d067 0xc00708cce7 0xc00708cce8}] [] [{e2e.test Update apps/v1 2023-08-24 12:24:04 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:24:25 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"3a253bf1-2f34-467e-804e-d9eda488d067\"}":{}}},"f:spec":{"f:replicas":{}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:24:25 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc00708cda8 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:24:25.685: INFO: &ReplicaSet{ObjectMeta:{test-rollover-deployment-768dcbc65b deployment-6194 4ed4bcfe-839a-44e8-8d3a-e7632da07a96 19532 2 2023-08-24 12:24:11 +0000 UTC map[name:rollover-pod pod-template-hash:768dcbc65b] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-rollover-deployment 3a253bf1-2f34-467e-804e-d9eda488d067 0xc00708cf47 0xc00708cf48}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:24:13 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"3a253bf1-2f34-467e-804e-d9eda488d067\"}":{}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"redis-slave\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:24:13 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 768dcbc65b,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:768dcbc65b] map[] [] [] []} {[] [] [{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00708cff8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:24:25.697: INFO: Pod "test-rollover-deployment-6c6df9974f-xvgz8" is available: + &Pod{ObjectMeta:{test-rollover-deployment-6c6df9974f-xvgz8 test-rollover-deployment-6c6df9974f- deployment-6194 7c44eb38-db0e-4a1c-bdd9-39b740feee3a 19545 0 2023-08-24 12:24:13 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[] [{apps/v1 ReplicaSet test-rollover-deployment-6c6df9974f 082d9606-3e45-4f9d-85f1-35af12302a59 0xc00708d567 0xc00708d568}] [] [{kube-controller-manager Update v1 2023-08-24 12:24:13 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"082d9606-3e45-4f9d-85f1-35af12302a59\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:24:15 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.125\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-2qvw4,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2qvw4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:24:13 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:24:15 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:24:15 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:24:13 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.125,StartTime:2023-08-24 12:24:13 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:24:14 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,ImageID:registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e,ContainerID:cri-o://b60a484e7ec4b92a1b8df72bd4bfe8273c631898b5e9f0a796b442d913977964,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.125,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + [AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:50.088: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 12:24:25.698: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-6002" for this suite. 07/29/23 16:18:50.096 + STEP: Destroying namespace "deployment-6194" for this suite. 08/24/23 12:24:25.709 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] ReplicaSet - Replace and Patch tests [Conformance] - test/e2e/apps/replica_set.go:154 -[BeforeEach] [sig-apps] ReplicaSet +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] + test/e2e/apps/statefulset.go:587 +[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:50.112 -Jul 29 16:18:50.112: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replicaset 07/29/23 16:18:50.114 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:50.148 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:50.153 -[BeforeEach] [sig-apps] ReplicaSet +STEP: Creating a kubernetes client 08/24/23 12:24:25.736 +Aug 24 12:24:25.737: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename statefulset 08/24/23 12:24:25.744 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:24:25.778 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:24:25.783 +[BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 -[It] Replace and Patch tests [Conformance] - test/e2e/apps/replica_set.go:154 -Jul 29 16:18:50.186: INFO: Pod name sample-pod: Found 0 pods out of 1 -Jul 29 16:18:55.196: INFO: Pod name sample-pod: Found 1 pods out of 1 -STEP: ensuring each pod is running 07/29/23 16:18:55.196 -STEP: Scaling up "test-rs" replicaset 07/29/23 16:18:55.196 -Jul 29 16:18:55.217: INFO: Updating replica set "test-rs" -STEP: patching the ReplicaSet 07/29/23 16:18:55.217 -W0729 16:18:55.236575 13 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" -Jul 29 16:18:55.241: INFO: observed ReplicaSet test-rs in namespace replicaset-2166 with ReadyReplicas 1, AvailableReplicas 1 -Jul 29 16:18:55.300: INFO: observed ReplicaSet test-rs in namespace replicaset-2166 with ReadyReplicas 1, AvailableReplicas 1 -Jul 29 16:18:55.355: INFO: observed ReplicaSet test-rs in namespace replicaset-2166 with ReadyReplicas 1, AvailableReplicas 1 -Jul 29 16:18:55.423: INFO: observed ReplicaSet test-rs in namespace replicaset-2166 with ReadyReplicas 1, AvailableReplicas 1 -Jul 29 16:18:57.287: INFO: observed ReplicaSet test-rs in namespace replicaset-2166 with ReadyReplicas 2, AvailableReplicas 2 -Jul 29 16:18:57.653: INFO: observed Replicaset test-rs in namespace replicaset-2166 with ReadyReplicas 3 found true -[AfterEach] [sig-apps] ReplicaSet +[BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 +STEP: Creating service test in namespace statefulset-1231 08/24/23 12:24:25.789 +[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] + test/e2e/apps/statefulset.go:587 +STEP: Initializing watcher for selector baz=blah,foo=bar 08/24/23 12:24:25.812 +STEP: Creating stateful set ss in namespace statefulset-1231 08/24/23 12:24:25.816 +STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-1231 08/24/23 12:24:25.829 +Aug 24 12:24:25.839: INFO: Found 0 stateful pods, waiting for 1 +Aug 24 12:24:35.849: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod 08/24/23 12:24:35.849 +Aug 24 12:24:35.857: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Aug 24 12:24:36.123: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Aug 24 12:24:36.123: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Aug 24 12:24:36.123: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Aug 24 12:24:36.129: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +Aug 24 12:24:46.141: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Aug 24 12:24:46.141: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 12:24:46.187: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999233s +Aug 24 12:24:47.194: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.992517991s +Aug 24 12:24:48.203: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.985141762s +Aug 24 12:24:49.212: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.976339295s +Aug 24 12:24:50.219: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.967712079s +Aug 24 12:24:51.228: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.960407188s +Aug 24 12:24:52.235: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.951556468s +Aug 24 12:24:53.244: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.943120202s +Aug 24 12:24:54.252: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.934961545s +Aug 24 12:24:55.261: INFO: Verifying statefulset ss doesn't scale past 1 for another 926.827487ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-1231 08/24/23 12:24:56.261 +Aug 24 12:24:56.270: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Aug 24 12:24:56.528: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Aug 24 12:24:56.528: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Aug 24 12:24:56.528: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Aug 24 12:24:56.538: INFO: Found 1 stateful pods, waiting for 3 +Aug 24 12:25:06.550: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 12:25:06.550: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 12:25:06.550: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Verifying that stateful set ss was scaled up in order 08/24/23 12:25:06.55 +STEP: Scale down will halt with unhealthy stateful pod 08/24/23 12:25:06.55 +Aug 24 12:25:06.565: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Aug 24 12:25:06.848: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Aug 24 12:25:06.848: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Aug 24 12:25:06.848: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Aug 24 12:25:06.850: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Aug 24 12:25:07.182: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Aug 24 12:25:07.182: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Aug 24 12:25:07.182: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Aug 24 12:25:07.182: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Aug 24 12:25:07.497: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Aug 24 12:25:07.497: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Aug 24 12:25:07.497: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Aug 24 12:25:07.497: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 12:25:07.502: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 +Aug 24 12:25:17.519: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Aug 24 12:25:17.520: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +Aug 24 12:25:17.520: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +Aug 24 12:25:17.547: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.999999758s +Aug 24 12:25:18.554: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.991242738s +Aug 24 12:25:19.564: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.982446647s +Aug 24 12:25:20.574: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.973604151s +Aug 24 12:25:21.585: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.963957558s +Aug 24 12:25:22.594: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.952667003s +Aug 24 12:25:23.604: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.943546508s +Aug 24 12:25:24.616: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.934191488s +Aug 24 12:25:25.626: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.921650837s +Aug 24 12:25:26.639: INFO: Verifying statefulset ss doesn't scale past 3 for another 911.901186ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-1231 08/24/23 12:25:27.64 +Aug 24 12:25:27.653: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Aug 24 12:25:27.994: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Aug 24 12:25:27.994: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Aug 24 12:25:27.994: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Aug 24 12:25:27.994: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Aug 24 12:25:28.335: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Aug 24 12:25:28.335: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Aug 24 12:25:28.335: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Aug 24 12:25:28.335: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Aug 24 12:25:28.637: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Aug 24 12:25:28.637: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Aug 24 12:25:28.637: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Aug 24 12:25:28.637: INFO: Scaling statefulset ss to 0 +STEP: Verifying that stateful set ss was scaled down in reverse order 08/24/23 12:25:38.677 +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 +Aug 24 12:25:38.678: INFO: Deleting all statefulset in ns statefulset-1231 +Aug 24 12:25:38.686: INFO: Scaling statefulset ss to 0 +Aug 24 12:25:38.711: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 12:25:38.717: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:57.654: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ReplicaSet +Aug 24 12:25:38.750: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ReplicaSet +[DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ReplicaSet +[DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 -STEP: Destroying namespace "replicaset-2166" for this suite. 07/29/23 16:18:57.664 +STEP: Destroying namespace "statefulset-1231" for this suite. 08/24/23 12:25:38.762 ------------------------------ -• [SLOW TEST] [7.572 seconds] -[sig-apps] ReplicaSet +• [SLOW TEST] [73.043 seconds] +[sig-apps] StatefulSet test/e2e/apps/framework.go:23 - Replace and Patch tests [Conformance] - test/e2e/apps/replica_set.go:154 + Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:103 + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] + test/e2e/apps/statefulset.go:587 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ReplicaSet + [BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:50.112 - Jul 29 16:18:50.112: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replicaset 07/29/23 16:18:50.114 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:50.148 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:50.153 - [BeforeEach] [sig-apps] ReplicaSet + STEP: Creating a kubernetes client 08/24/23 12:24:25.736 + Aug 24 12:24:25.737: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename statefulset 08/24/23 12:24:25.744 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:24:25.778 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:24:25.783 + [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 - [It] Replace and Patch tests [Conformance] - test/e2e/apps/replica_set.go:154 - Jul 29 16:18:50.186: INFO: Pod name sample-pod: Found 0 pods out of 1 - Jul 29 16:18:55.196: INFO: Pod name sample-pod: Found 1 pods out of 1 - STEP: ensuring each pod is running 07/29/23 16:18:55.196 - STEP: Scaling up "test-rs" replicaset 07/29/23 16:18:55.196 - Jul 29 16:18:55.217: INFO: Updating replica set "test-rs" - STEP: patching the ReplicaSet 07/29/23 16:18:55.217 - W0729 16:18:55.236575 13 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" - Jul 29 16:18:55.241: INFO: observed ReplicaSet test-rs in namespace replicaset-2166 with ReadyReplicas 1, AvailableReplicas 1 - Jul 29 16:18:55.300: INFO: observed ReplicaSet test-rs in namespace replicaset-2166 with ReadyReplicas 1, AvailableReplicas 1 - Jul 29 16:18:55.355: INFO: observed ReplicaSet test-rs in namespace replicaset-2166 with ReadyReplicas 1, AvailableReplicas 1 - Jul 29 16:18:55.423: INFO: observed ReplicaSet test-rs in namespace replicaset-2166 with ReadyReplicas 1, AvailableReplicas 1 - Jul 29 16:18:57.287: INFO: observed ReplicaSet test-rs in namespace replicaset-2166 with ReadyReplicas 2, AvailableReplicas 2 - Jul 29 16:18:57.653: INFO: observed Replicaset test-rs in namespace replicaset-2166 with ReadyReplicas 3 found true - [AfterEach] [sig-apps] ReplicaSet + [BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 + [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 + STEP: Creating service test in namespace statefulset-1231 08/24/23 12:24:25.789 + [It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] + test/e2e/apps/statefulset.go:587 + STEP: Initializing watcher for selector baz=blah,foo=bar 08/24/23 12:24:25.812 + STEP: Creating stateful set ss in namespace statefulset-1231 08/24/23 12:24:25.816 + STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-1231 08/24/23 12:24:25.829 + Aug 24 12:24:25.839: INFO: Found 0 stateful pods, waiting for 1 + Aug 24 12:24:35.849: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true + STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod 08/24/23 12:24:35.849 + Aug 24 12:24:35.857: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' + Aug 24 12:24:36.123: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" + Aug 24 12:24:36.123: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" + Aug 24 12:24:36.123: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + + Aug 24 12:24:36.129: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true + Aug 24 12:24:46.141: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false + Aug 24 12:24:46.141: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 12:24:46.187: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999233s + Aug 24 12:24:47.194: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.992517991s + Aug 24 12:24:48.203: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.985141762s + Aug 24 12:24:49.212: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.976339295s + Aug 24 12:24:50.219: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.967712079s + Aug 24 12:24:51.228: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.960407188s + Aug 24 12:24:52.235: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.951556468s + Aug 24 12:24:53.244: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.943120202s + Aug 24 12:24:54.252: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.934961545s + Aug 24 12:24:55.261: INFO: Verifying statefulset ss doesn't scale past 1 for another 926.827487ms + STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-1231 08/24/23 12:24:56.261 + Aug 24 12:24:56.270: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' + Aug 24 12:24:56.528: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" + Aug 24 12:24:56.528: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" + Aug 24 12:24:56.528: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + + Aug 24 12:24:56.538: INFO: Found 1 stateful pods, waiting for 3 + Aug 24 12:25:06.550: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 12:25:06.550: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 12:25:06.550: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true + STEP: Verifying that stateful set ss was scaled up in order 08/24/23 12:25:06.55 + STEP: Scale down will halt with unhealthy stateful pod 08/24/23 12:25:06.55 + Aug 24 12:25:06.565: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' + Aug 24 12:25:06.848: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" + Aug 24 12:25:06.848: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" + Aug 24 12:25:06.848: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + + Aug 24 12:25:06.850: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' + Aug 24 12:25:07.182: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" + Aug 24 12:25:07.182: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" + Aug 24 12:25:07.182: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + + Aug 24 12:25:07.182: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' + Aug 24 12:25:07.497: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" + Aug 24 12:25:07.497: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" + Aug 24 12:25:07.497: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + + Aug 24 12:25:07.497: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 12:25:07.502: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 + Aug 24 12:25:17.519: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false + Aug 24 12:25:17.520: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false + Aug 24 12:25:17.520: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false + Aug 24 12:25:17.547: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.999999758s + Aug 24 12:25:18.554: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.991242738s + Aug 24 12:25:19.564: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.982446647s + Aug 24 12:25:20.574: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.973604151s + Aug 24 12:25:21.585: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.963957558s + Aug 24 12:25:22.594: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.952667003s + Aug 24 12:25:23.604: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.943546508s + Aug 24 12:25:24.616: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.934191488s + Aug 24 12:25:25.626: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.921650837s + Aug 24 12:25:26.639: INFO: Verifying statefulset ss doesn't scale past 3 for another 911.901186ms + STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-1231 08/24/23 12:25:27.64 + Aug 24 12:25:27.653: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' + Aug 24 12:25:27.994: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" + Aug 24 12:25:27.994: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" + Aug 24 12:25:27.994: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + + Aug 24 12:25:27.994: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' + Aug 24 12:25:28.335: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" + Aug 24 12:25:28.335: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" + Aug 24 12:25:28.335: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + + Aug 24 12:25:28.335: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-1231 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' + Aug 24 12:25:28.637: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" + Aug 24 12:25:28.637: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" + Aug 24 12:25:28.637: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + + Aug 24 12:25:28.637: INFO: Scaling statefulset ss to 0 + STEP: Verifying that stateful set ss was scaled down in reverse order 08/24/23 12:25:38.677 + [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 + Aug 24 12:25:38.678: INFO: Deleting all statefulset in ns statefulset-1231 + Aug 24 12:25:38.686: INFO: Scaling statefulset ss to 0 + Aug 24 12:25:38.711: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 12:25:38.717: INFO: Deleting statefulset ss + [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:57.654: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ReplicaSet + Aug 24 12:25:38.750: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ReplicaSet + [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ReplicaSet + [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 - STEP: Destroying namespace "replicaset-2166" for this suite. 07/29/23 16:18:57.664 + STEP: Destroying namespace "statefulset-1231" for this suite. 08/24/23 12:25:38.762 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSS ------------------------------ -[sig-node] Containers - should use the image defaults if command and args are blank [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:39 -[BeforeEach] [sig-node] Containers +[sig-apps] Daemon set [Serial] + should list and delete a collection of DaemonSets [Conformance] + test/e2e/apps/daemon_set.go:834 +[BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:57.695 -Jul 29 16:18:57.695: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename containers 07/29/23 16:18:57.698 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:57.735 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:57.742 -[BeforeEach] [sig-node] Containers +STEP: Creating a kubernetes client 08/24/23 12:25:38.781 +Aug 24 12:25:38.783: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename daemonsets 08/24/23 12:25:38.787 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:25:38.82 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:25:38.824 +[BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:39 -Jul 29 16:18:57.764: INFO: Waiting up to 5m0s for pod "client-containers-741c67fa-ae09-478b-90a9-bedbc722c7ac" in namespace "containers-5535" to be "running" -Jul 29 16:18:57.771: INFO: Pod "client-containers-741c67fa-ae09-478b-90a9-bedbc722c7ac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.379413ms -Jul 29 16:18:59.788: INFO: Pod "client-containers-741c67fa-ae09-478b-90a9-bedbc722c7ac": Phase="Running", Reason="", readiness=true. Elapsed: 2.02327789s -Jul 29 16:18:59.788: INFO: Pod "client-containers-741c67fa-ae09-478b-90a9-bedbc722c7ac" satisfied condition "running" -[AfterEach] [sig-node] Containers +[BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 +[It] should list and delete a collection of DaemonSets [Conformance] + test/e2e/apps/daemon_set.go:834 +STEP: Creating simple DaemonSet "daemon-set" 08/24/23 12:25:38.868 +STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 12:25:38.88 +Aug 24 12:25:38.894: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:25:38.894: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:25:39.922: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:25:39.923: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:25:40.911: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 12:25:40.911: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:25:41.917: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 12:25:41.918: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:25:42.913: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 +Aug 24 12:25:42.914: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set +STEP: listing all DeamonSets 08/24/23 12:25:42.92 +STEP: DeleteCollection of the DaemonSets 08/24/23 12:25:42.931 +STEP: Verify that ReplicaSets have been deleted 08/24/23 12:25:42.95 +[AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 +Aug 24 12:25:42.982: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"20038"},"items":null} + +Aug 24 12:25:42.992: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"20038"},"items":[{"metadata":{"name":"daemon-set-g8l88","generateName":"daemon-set-","namespace":"daemonsets-7867","uid":"5ed3c1a6-1562-4070-a8f5-1f914e667434","resourceVersion":"20024","creationTimestamp":"2023-08-24T12:25:38Z","labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"49126391-02e5-4f9c-8e39-702d32697287","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:38Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"49126391-02e5-4f9c-8e39-702d32697287\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:40Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.195\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-778vn","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-778vn","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"pe9deep4seen-3","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["pe9deep4seen-3"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:38Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:40Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:40Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:38Z"}],"hostIP":"192.168.121.130","podIP":"10.233.66.195","podIPs":[{"ip":"10.233.66.195"}],"startTime":"2023-08-24T12:25:38Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-08-24T12:25:40Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://0c4a0c4c1f127a0d7634fdb4b6ef2f1186bc747948c1ff5a3d16dfc7b2632467","started":true}],"qosClass":"BestEffort"}},{"metadata":{"name":"daemon-set-q7v59","generateName":"daemon-set-","namespace":"daemonsets-7867","uid":"2c1aa335-92f1-4b11-8331-18243a71c598","resourceVersion":"20034","creationTimestamp":"2023-08-24T12:25:38Z","labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"49126391-02e5-4f9c-8e39-702d32697287","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:38Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"49126391-02e5-4f9c-8e39-702d32697287\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:42Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.16\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-dhqf6","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-dhqf6","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"pe9deep4seen-1","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["pe9deep4seen-1"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:38Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:42Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:42Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:38Z"}],"hostIP":"192.168.121.127","podIP":"10.233.64.16","podIPs":[{"ip":"10.233.64.16"}],"startTime":"2023-08-24T12:25:38Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-08-24T12:25:41Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://c40cc37da81419ddfd4e9b2eed53a688e3611213015129c1051e7eed414a1d69","started":true}],"qosClass":"BestEffort"}},{"metadata":{"name":"daemon-set-x86rv","generateName":"daemon-set-","namespace":"daemonsets-7867","uid":"055e48e7-5329-45fd-b77d-03f2f0ab2b9c","resourceVersion":"20026","creationTimestamp":"2023-08-24T12:25:38Z","labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"49126391-02e5-4f9c-8e39-702d32697287","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:38Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"49126391-02e5-4f9c-8e39-702d32697287\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:40Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.43\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-472zc","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-472zc","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"pe9deep4seen-2","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["pe9deep4seen-2"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:39Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:40Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:40Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:38Z"}],"hostIP":"192.168.121.111","podIP":"10.233.65.43","podIPs":[{"ip":"10.233.65.43"}],"startTime":"2023-08-24T12:25:39Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-08-24T12:25:40Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://ad912fa2ca3caa2e15573019d661489674f7690fbc5c880bdd3668079a0986f5","started":true}],"qosClass":"BestEffort"}}]} + +[AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:18:59.803: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Containers +Aug 24 12:25:43.107: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Containers +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Containers +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "containers-5535" for this suite. 07/29/23 16:18:59.817 +STEP: Destroying namespace "daemonsets-7867" for this suite. 08/24/23 12:25:43.118 ------------------------------ -• [2.136 seconds] -[sig-node] Containers -test/e2e/common/node/framework.go:23 - should use the image defaults if command and args are blank [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:39 +• [4.355 seconds] +[sig-apps] Daemon set [Serial] +test/e2e/apps/framework.go:23 + should list and delete a collection of DaemonSets [Conformance] + test/e2e/apps/daemon_set.go:834 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Containers + [BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:57.695 - Jul 29 16:18:57.695: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename containers 07/29/23 16:18:57.698 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:57.735 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:57.742 - [BeforeEach] [sig-node] Containers + STEP: Creating a kubernetes client 08/24/23 12:25:38.781 + Aug 24 12:25:38.783: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename daemonsets 08/24/23 12:25:38.787 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:25:38.82 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:25:38.824 + [BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should use the image defaults if command and args are blank [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:39 - Jul 29 16:18:57.764: INFO: Waiting up to 5m0s for pod "client-containers-741c67fa-ae09-478b-90a9-bedbc722c7ac" in namespace "containers-5535" to be "running" - Jul 29 16:18:57.771: INFO: Pod "client-containers-741c67fa-ae09-478b-90a9-bedbc722c7ac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.379413ms - Jul 29 16:18:59.788: INFO: Pod "client-containers-741c67fa-ae09-478b-90a9-bedbc722c7ac": Phase="Running", Reason="", readiness=true. Elapsed: 2.02327789s - Jul 29 16:18:59.788: INFO: Pod "client-containers-741c67fa-ae09-478b-90a9-bedbc722c7ac" satisfied condition "running" - [AfterEach] [sig-node] Containers + [BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 + [It] should list and delete a collection of DaemonSets [Conformance] + test/e2e/apps/daemon_set.go:834 + STEP: Creating simple DaemonSet "daemon-set" 08/24/23 12:25:38.868 + STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 12:25:38.88 + Aug 24 12:25:38.894: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:25:38.894: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:25:39.922: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:25:39.923: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:25:40.911: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 12:25:40.911: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:25:41.917: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 12:25:41.918: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:25:42.913: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 + Aug 24 12:25:42.914: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set + STEP: listing all DeamonSets 08/24/23 12:25:42.92 + STEP: DeleteCollection of the DaemonSets 08/24/23 12:25:42.931 + STEP: Verify that ReplicaSets have been deleted 08/24/23 12:25:42.95 + [AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 + Aug 24 12:25:42.982: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"20038"},"items":null} + + Aug 24 12:25:42.992: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"20038"},"items":[{"metadata":{"name":"daemon-set-g8l88","generateName":"daemon-set-","namespace":"daemonsets-7867","uid":"5ed3c1a6-1562-4070-a8f5-1f914e667434","resourceVersion":"20024","creationTimestamp":"2023-08-24T12:25:38Z","labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"49126391-02e5-4f9c-8e39-702d32697287","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:38Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"49126391-02e5-4f9c-8e39-702d32697287\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:40Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.195\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-778vn","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-778vn","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"pe9deep4seen-3","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["pe9deep4seen-3"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:38Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:40Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:40Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:38Z"}],"hostIP":"192.168.121.130","podIP":"10.233.66.195","podIPs":[{"ip":"10.233.66.195"}],"startTime":"2023-08-24T12:25:38Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-08-24T12:25:40Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://0c4a0c4c1f127a0d7634fdb4b6ef2f1186bc747948c1ff5a3d16dfc7b2632467","started":true}],"qosClass":"BestEffort"}},{"metadata":{"name":"daemon-set-q7v59","generateName":"daemon-set-","namespace":"daemonsets-7867","uid":"2c1aa335-92f1-4b11-8331-18243a71c598","resourceVersion":"20034","creationTimestamp":"2023-08-24T12:25:38Z","labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"49126391-02e5-4f9c-8e39-702d32697287","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:38Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"49126391-02e5-4f9c-8e39-702d32697287\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:42Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.16\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-dhqf6","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-dhqf6","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"pe9deep4seen-1","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["pe9deep4seen-1"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:38Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:42Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:42Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:38Z"}],"hostIP":"192.168.121.127","podIP":"10.233.64.16","podIPs":[{"ip":"10.233.64.16"}],"startTime":"2023-08-24T12:25:38Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-08-24T12:25:41Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://c40cc37da81419ddfd4e9b2eed53a688e3611213015129c1051e7eed414a1d69","started":true}],"qosClass":"BestEffort"}},{"metadata":{"name":"daemon-set-x86rv","generateName":"daemon-set-","namespace":"daemonsets-7867","uid":"055e48e7-5329-45fd-b77d-03f2f0ab2b9c","resourceVersion":"20026","creationTimestamp":"2023-08-24T12:25:38Z","labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"49126391-02e5-4f9c-8e39-702d32697287","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:38Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"49126391-02e5-4f9c-8e39-702d32697287\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-08-24T12:25:40Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.43\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-472zc","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-472zc","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"pe9deep4seen-2","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["pe9deep4seen-2"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:39Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:40Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:40Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-08-24T12:25:38Z"}],"hostIP":"192.168.121.111","podIP":"10.233.65.43","podIPs":[{"ip":"10.233.65.43"}],"startTime":"2023-08-24T12:25:39Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-08-24T12:25:40Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://ad912fa2ca3caa2e15573019d661489674f7690fbc5c880bdd3668079a0986f5","started":true}],"qosClass":"BestEffort"}}]} + + [AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:18:59.803: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Containers + Aug 24 12:25:43.107: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Containers + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Containers + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "containers-5535" for this suite. 07/29/23 16:18:59.817 + STEP: Destroying namespace "daemonsets-7867" for this suite. 08/24/23 12:25:43.118 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSS +SSSS ------------------------------ -[sig-network] IngressClass API - should support creating IngressClass API operations [Conformance] - test/e2e/network/ingressclass.go:223 -[BeforeEach] [sig-network] IngressClass API +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + should have a working scale subresource [Conformance] + test/e2e/apps/statefulset.go:848 +[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:18:59.835 -Jul 29 16:18:59.835: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename ingressclass 07/29/23 16:18:59.839 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:59.872 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:59.876 -[BeforeEach] [sig-network] IngressClass API +STEP: Creating a kubernetes client 08/24/23 12:25:43.139 +Aug 24 12:25:43.140: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename statefulset 08/24/23 12:25:43.143 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:25:43.185 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:25:43.195 +[BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] IngressClass API - test/e2e/network/ingressclass.go:211 -[It] should support creating IngressClass API operations [Conformance] - test/e2e/network/ingressclass.go:223 -STEP: getting /apis 07/29/23 16:18:59.886 -STEP: getting /apis/networking.k8s.io 07/29/23 16:18:59.891 -STEP: getting /apis/networking.k8s.iov1 07/29/23 16:18:59.893 -STEP: creating 07/29/23 16:18:59.895 -STEP: getting 07/29/23 16:18:59.953 -STEP: listing 07/29/23 16:18:59.959 -STEP: watching 07/29/23 16:18:59.964 -Jul 29 16:18:59.964: INFO: starting watch -STEP: patching 07/29/23 16:18:59.966 -STEP: updating 07/29/23 16:18:59.977 -Jul 29 16:18:59.987: INFO: waiting for watch events with expected annotations -Jul 29 16:18:59.987: INFO: saw patched and updated annotations -STEP: deleting 07/29/23 16:18:59.988 -STEP: deleting a collection 07/29/23 16:19:00.009 -[AfterEach] [sig-network] IngressClass API +[BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 +STEP: Creating service test in namespace statefulset-1365 08/24/23 12:25:43.204 +[It] should have a working scale subresource [Conformance] + test/e2e/apps/statefulset.go:848 +STEP: Creating statefulset ss in namespace statefulset-1365 08/24/23 12:25:43.223 +Aug 24 12:25:43.256: INFO: Found 0 stateful pods, waiting for 1 +Aug 24 12:25:53.264: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: getting scale subresource 08/24/23 12:25:53.278 +STEP: updating a scale subresource 08/24/23 12:25:53.284 +STEP: verifying the statefulset Spec.Replicas was modified 08/24/23 12:25:53.295 +STEP: Patch a scale subresource 08/24/23 12:25:53.304 +STEP: verifying the statefulset Spec.Replicas was modified 08/24/23 12:25:53.32 +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 +Aug 24 12:25:53.327: INFO: Deleting all statefulset in ns statefulset-1365 +Aug 24 12:25:53.343: INFO: Scaling statefulset ss to 0 +Aug 24 12:26:03.442: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 12:26:03.450: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 -Jul 29 16:19:00.036: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] IngressClass API +Aug 24 12:26:03.479: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] IngressClass API +[DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] IngressClass API +[DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 -STEP: Destroying namespace "ingressclass-846" for this suite. 07/29/23 16:19:00.045 +STEP: Destroying namespace "statefulset-1365" for this suite. 08/24/23 12:26:03.49 ------------------------------ -• [0.225 seconds] -[sig-network] IngressClass API -test/e2e/network/common/framework.go:23 - should support creating IngressClass API operations [Conformance] - test/e2e/network/ingressclass.go:223 +• [SLOW TEST] [20.363 seconds] +[sig-apps] StatefulSet +test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:103 + should have a working scale subresource [Conformance] + test/e2e/apps/statefulset.go:848 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] IngressClass API + [BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:18:59.835 - Jul 29 16:18:59.835: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename ingressclass 07/29/23 16:18:59.839 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:18:59.872 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:18:59.876 - [BeforeEach] [sig-network] IngressClass API + STEP: Creating a kubernetes client 08/24/23 12:25:43.139 + Aug 24 12:25:43.140: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename statefulset 08/24/23 12:25:43.143 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:25:43.185 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:25:43.195 + [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] IngressClass API - test/e2e/network/ingressclass.go:211 - [It] should support creating IngressClass API operations [Conformance] - test/e2e/network/ingressclass.go:223 - STEP: getting /apis 07/29/23 16:18:59.886 - STEP: getting /apis/networking.k8s.io 07/29/23 16:18:59.891 - STEP: getting /apis/networking.k8s.iov1 07/29/23 16:18:59.893 - STEP: creating 07/29/23 16:18:59.895 - STEP: getting 07/29/23 16:18:59.953 - STEP: listing 07/29/23 16:18:59.959 - STEP: watching 07/29/23 16:18:59.964 - Jul 29 16:18:59.964: INFO: starting watch - STEP: patching 07/29/23 16:18:59.966 - STEP: updating 07/29/23 16:18:59.977 - Jul 29 16:18:59.987: INFO: waiting for watch events with expected annotations - Jul 29 16:18:59.987: INFO: saw patched and updated annotations - STEP: deleting 07/29/23 16:18:59.988 - STEP: deleting a collection 07/29/23 16:19:00.009 - [AfterEach] [sig-network] IngressClass API + [BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 + [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 + STEP: Creating service test in namespace statefulset-1365 08/24/23 12:25:43.204 + [It] should have a working scale subresource [Conformance] + test/e2e/apps/statefulset.go:848 + STEP: Creating statefulset ss in namespace statefulset-1365 08/24/23 12:25:43.223 + Aug 24 12:25:43.256: INFO: Found 0 stateful pods, waiting for 1 + Aug 24 12:25:53.264: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true + STEP: getting scale subresource 08/24/23 12:25:53.278 + STEP: updating a scale subresource 08/24/23 12:25:53.284 + STEP: verifying the statefulset Spec.Replicas was modified 08/24/23 12:25:53.295 + STEP: Patch a scale subresource 08/24/23 12:25:53.304 + STEP: verifying the statefulset Spec.Replicas was modified 08/24/23 12:25:53.32 + [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 + Aug 24 12:25:53.327: INFO: Deleting all statefulset in ns statefulset-1365 + Aug 24 12:25:53.343: INFO: Scaling statefulset ss to 0 + Aug 24 12:26:03.442: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 12:26:03.450: INFO: Deleting statefulset ss + [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 - Jul 29 16:19:00.036: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] IngressClass API + Aug 24 12:26:03.479: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] IngressClass API + [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] IngressClass API + [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 - STEP: Destroying namespace "ingressclass-846" for this suite. 07/29/23 16:19:00.045 + STEP: Destroying namespace "statefulset-1365" for this suite. 08/24/23 12:26:03.49 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Kubelet when scheduling a busybox command that always fails in a pod - should have an terminated reason [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:110 -[BeforeEach] [sig-node] Kubelet +[sig-api-machinery] Garbage collector + should not be blocked by dependency circle [Conformance] + test/e2e/apimachinery/garbage_collector.go:849 +[BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:19:00.066 -Jul 29 16:19:00.066: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubelet-test 07/29/23 16:19:00.067 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:19:00.095 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:19:00.101 -[BeforeEach] [sig-node] Kubelet +STEP: Creating a kubernetes client 08/24/23 12:26:03.512 +Aug 24 12:26:03.512: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename gc 08/24/23 12:26:03.514 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:26:03.549 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:26:03.553 +[BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Kubelet - test/e2e/common/node/kubelet.go:41 -[BeforeEach] when scheduling a busybox command that always fails in a pod - test/e2e/common/node/kubelet.go:85 -[It] should have an terminated reason [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:110 -[AfterEach] [sig-node] Kubelet +[It] should not be blocked by dependency circle [Conformance] + test/e2e/apimachinery/garbage_collector.go:849 +Aug 24 12:26:03.655: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"e6987ead-b79a-4cb2-9887-aa11a57d82b6", Controller:(*bool)(0xc004b5f636), BlockOwnerDeletion:(*bool)(0xc004b5f637)}} +Aug 24 12:26:03.676: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"b2429a34-9311-4b94-8f72-a96e802fab10", Controller:(*bool)(0xc004b5f8aa), BlockOwnerDeletion:(*bool)(0xc004b5f8ab)}} +Aug 24 12:26:03.690: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"a6d24715-ca3a-451a-83cf-424c9b14515c", Controller:(*bool)(0xc0050a6446), BlockOwnerDeletion:(*bool)(0xc0050a6447)}} +[AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 -Jul 29 16:19:04.138: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Kubelet +Aug 24 12:26:08.713: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Kubelet +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Kubelet +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 -STEP: Destroying namespace "kubelet-test-8330" for this suite. 07/29/23 16:19:04.148 +STEP: Destroying namespace "gc-4499" for this suite. 08/24/23 12:26:08.726 ------------------------------ -• [4.099 seconds] -[sig-node] Kubelet -test/e2e/common/node/framework.go:23 - when scheduling a busybox command that always fails in a pod - test/e2e/common/node/kubelet.go:82 - should have an terminated reason [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:110 +• [SLOW TEST] [5.226 seconds] +[sig-api-machinery] Garbage collector +test/e2e/apimachinery/framework.go:23 + should not be blocked by dependency circle [Conformance] + test/e2e/apimachinery/garbage_collector.go:849 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Kubelet + [BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:19:00.066 - Jul 29 16:19:00.066: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubelet-test 07/29/23 16:19:00.067 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:19:00.095 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:19:00.101 - [BeforeEach] [sig-node] Kubelet + STEP: Creating a kubernetes client 08/24/23 12:26:03.512 + Aug 24 12:26:03.512: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename gc 08/24/23 12:26:03.514 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:26:03.549 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:26:03.553 + [BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Kubelet - test/e2e/common/node/kubelet.go:41 - [BeforeEach] when scheduling a busybox command that always fails in a pod - test/e2e/common/node/kubelet.go:85 - [It] should have an terminated reason [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:110 - [AfterEach] [sig-node] Kubelet + [It] should not be blocked by dependency circle [Conformance] + test/e2e/apimachinery/garbage_collector.go:849 + Aug 24 12:26:03.655: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"e6987ead-b79a-4cb2-9887-aa11a57d82b6", Controller:(*bool)(0xc004b5f636), BlockOwnerDeletion:(*bool)(0xc004b5f637)}} + Aug 24 12:26:03.676: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"b2429a34-9311-4b94-8f72-a96e802fab10", Controller:(*bool)(0xc004b5f8aa), BlockOwnerDeletion:(*bool)(0xc004b5f8ab)}} + Aug 24 12:26:03.690: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"a6d24715-ca3a-451a-83cf-424c9b14515c", Controller:(*bool)(0xc0050a6446), BlockOwnerDeletion:(*bool)(0xc0050a6447)}} + [AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 - Jul 29 16:19:04.138: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Kubelet + Aug 24 12:26:08.713: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Kubelet + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Kubelet + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 - STEP: Destroying namespace "kubelet-test-8330" for this suite. 07/29/23 16:19:04.148 + STEP: Destroying namespace "gc-4499" for this suite. 08/24/23 12:26:08.726 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-network] DNS - should provide DNS for the cluster [Conformance] - test/e2e/network/dns.go:50 -[BeforeEach] [sig-network] DNS +[sig-api-machinery] server version + should find the server version [Conformance] + test/e2e/apimachinery/server_version.go:39 +[BeforeEach] [sig-api-machinery] server version set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:19:04.171 -Jul 29 16:19:04.172: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename dns 07/29/23 16:19:04.173 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:19:04.215 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:19:04.22 -[BeforeEach] [sig-network] DNS +STEP: Creating a kubernetes client 08/24/23 12:26:08.74 +Aug 24 12:26:08.740: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename server-version 08/24/23 12:26:08.743 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:26:08.776 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:26:08.781 +[BeforeEach] [sig-api-machinery] server version test/e2e/framework/metrics/init/init.go:31 -[It] should provide DNS for the cluster [Conformance] - test/e2e/network/dns.go:50 -STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;sleep 1; done - 07/29/23 16:19:04.223 -STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;sleep 1; done - 07/29/23 16:19:04.223 -STEP: creating a pod to probe DNS 07/29/23 16:19:04.223 -STEP: submitting the pod to kubernetes 07/29/23 16:19:04.224 -Jul 29 16:19:04.240: INFO: Waiting up to 15m0s for pod "dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc" in namespace "dns-6385" to be "running" -Jul 29 16:19:04.247: INFO: Pod "dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc": Phase="Pending", Reason="", readiness=false. Elapsed: 6.567785ms -Jul 29 16:19:06.254: INFO: Pod "dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013588305s -Jul 29 16:19:08.262: INFO: Pod "dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc": Phase="Running", Reason="", readiness=true. Elapsed: 4.021134071s -Jul 29 16:19:08.262: INFO: Pod "dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc" satisfied condition "running" -STEP: retrieving the pod 07/29/23 16:19:08.262 -STEP: looking for the results for each expected name from probers 07/29/23 16:19:08.272 -Jul 29 16:19:08.323: INFO: DNS probes using dns-6385/dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc succeeded - -STEP: deleting the pod 07/29/23 16:19:08.323 -[AfterEach] [sig-network] DNS +[It] should find the server version [Conformance] + test/e2e/apimachinery/server_version.go:39 +STEP: Request ServerVersion 08/24/23 12:26:08.785 +STEP: Confirm major version 08/24/23 12:26:08.786 +Aug 24 12:26:08.787: INFO: Major version: 1 +STEP: Confirm minor version 08/24/23 12:26:08.787 +Aug 24 12:26:08.787: INFO: cleanMinorVersion: 26 +Aug 24 12:26:08.787: INFO: Minor version: 26 +[AfterEach] [sig-api-machinery] server version test/e2e/framework/node/init/init.go:32 -Jul 29 16:19:08.347: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] DNS +Aug 24 12:26:08.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] server version test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-api-machinery] server version dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-api-machinery] server version tear down framework | framework.go:193 -STEP: Destroying namespace "dns-6385" for this suite. 07/29/23 16:19:08.358 +STEP: Destroying namespace "server-version-6323" for this suite. 08/24/23 12:26:08.796 ------------------------------ -• [4.205 seconds] -[sig-network] DNS -test/e2e/network/common/framework.go:23 - should provide DNS for the cluster [Conformance] - test/e2e/network/dns.go:50 +• [0.069 seconds] +[sig-api-machinery] server version +test/e2e/apimachinery/framework.go:23 + should find the server version [Conformance] + test/e2e/apimachinery/server_version.go:39 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] DNS + [BeforeEach] [sig-api-machinery] server version set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:19:04.171 - Jul 29 16:19:04.172: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename dns 07/29/23 16:19:04.173 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:19:04.215 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:19:04.22 - [BeforeEach] [sig-network] DNS + STEP: Creating a kubernetes client 08/24/23 12:26:08.74 + Aug 24 12:26:08.740: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename server-version 08/24/23 12:26:08.743 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:26:08.776 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:26:08.781 + [BeforeEach] [sig-api-machinery] server version test/e2e/framework/metrics/init/init.go:31 - [It] should provide DNS for the cluster [Conformance] - test/e2e/network/dns.go:50 - STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;sleep 1; done - 07/29/23 16:19:04.223 - STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;sleep 1; done - 07/29/23 16:19:04.223 - STEP: creating a pod to probe DNS 07/29/23 16:19:04.223 - STEP: submitting the pod to kubernetes 07/29/23 16:19:04.224 - Jul 29 16:19:04.240: INFO: Waiting up to 15m0s for pod "dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc" in namespace "dns-6385" to be "running" - Jul 29 16:19:04.247: INFO: Pod "dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc": Phase="Pending", Reason="", readiness=false. Elapsed: 6.567785ms - Jul 29 16:19:06.254: INFO: Pod "dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013588305s - Jul 29 16:19:08.262: INFO: Pod "dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc": Phase="Running", Reason="", readiness=true. Elapsed: 4.021134071s - Jul 29 16:19:08.262: INFO: Pod "dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc" satisfied condition "running" - STEP: retrieving the pod 07/29/23 16:19:08.262 - STEP: looking for the results for each expected name from probers 07/29/23 16:19:08.272 - Jul 29 16:19:08.323: INFO: DNS probes using dns-6385/dns-test-b783a3f1-a651-4e18-b18d-a1b1f0dcdebc succeeded - - STEP: deleting the pod 07/29/23 16:19:08.323 - [AfterEach] [sig-network] DNS + [It] should find the server version [Conformance] + test/e2e/apimachinery/server_version.go:39 + STEP: Request ServerVersion 08/24/23 12:26:08.785 + STEP: Confirm major version 08/24/23 12:26:08.786 + Aug 24 12:26:08.787: INFO: Major version: 1 + STEP: Confirm minor version 08/24/23 12:26:08.787 + Aug 24 12:26:08.787: INFO: cleanMinorVersion: 26 + Aug 24 12:26:08.787: INFO: Minor version: 26 + [AfterEach] [sig-api-machinery] server version test/e2e/framework/node/init/init.go:32 - Jul 29 16:19:08.347: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] DNS + Aug 24 12:26:08.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] server version test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-api-machinery] server version dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-api-machinery] server version tear down framework | framework.go:193 - STEP: Destroying namespace "dns-6385" for this suite. 07/29/23 16:19:08.358 + STEP: Destroying namespace "server-version-6323" for this suite. 08/24/23 12:26:08.796 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-node] RuntimeClass - should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:156 -[BeforeEach] [sig-node] RuntimeClass +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:99 +[BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:19:08.379 -Jul 29 16:19:08.379: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename runtimeclass 07/29/23 16:19:08.381 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:19:08.415 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:19:08.423 -[BeforeEach] [sig-node] RuntimeClass +STEP: Creating a kubernetes client 08/24/23 12:26:08.811 +Aug 24 12:26:08.811: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:26:08.814 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:26:08.846 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:26:08.85 +[BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 -[It] should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:156 -STEP: Deleting RuntimeClass runtimeclass-9828-delete-me 07/29/23 16:19:08.442 -STEP: Waiting for the RuntimeClass to disappear 07/29/23 16:19:08.453 -[AfterEach] [sig-node] RuntimeClass +[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:99 +STEP: Creating configMap with name projected-configmap-test-volume-map-a7a01ee3-e5fc-4b22-8f0b-36d967a24c6b 08/24/23 12:26:08.853 +STEP: Creating a pod to test consume configMaps 08/24/23 12:26:08.861 +Aug 24 12:26:08.875: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1" in namespace "projected-7089" to be "Succeeded or Failed" +Aug 24 12:26:08.881: INFO: Pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1": Phase="Pending", Reason="", readiness=false. Elapsed: 6.636592ms +Aug 24 12:26:10.890: INFO: Pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01542597s +Aug 24 12:26:12.890: INFO: Pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1": Phase="Pending", Reason="", readiness=false. Elapsed: 4.015327567s +Aug 24 12:26:14.891: INFO: Pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016604244s +STEP: Saw pod success 08/24/23 12:26:14.892 +Aug 24 12:26:14.892: INFO: Pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1" satisfied condition "Succeeded or Failed" +Aug 24 12:26:14.897: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1 container agnhost-container: +STEP: delete the pod 08/24/23 12:26:14.925 +Aug 24 12:26:14.947: INFO: Waiting for pod pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1 to disappear +Aug 24 12:26:14.952: INFO: Pod pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1 no longer exists +[AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:19:08.468: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] RuntimeClass +Aug 24 12:26:14.952: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] RuntimeClass +[DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] RuntimeClass +[DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 -STEP: Destroying namespace "runtimeclass-9828" for this suite. 07/29/23 16:19:08.474 +STEP: Destroying namespace "projected-7089" for this suite. 08/24/23 12:26:14.96 ------------------------------ -• [0.106 seconds] -[sig-node] RuntimeClass -test/e2e/common/node/framework.go:23 - should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:156 +• [SLOW TEST] [6.162 seconds] +[sig-storage] Projected configMap +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:99 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] RuntimeClass + [BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:19:08.379 - Jul 29 16:19:08.379: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename runtimeclass 07/29/23 16:19:08.381 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:19:08.415 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:19:08.423 - [BeforeEach] [sig-node] RuntimeClass + STEP: Creating a kubernetes client 08/24/23 12:26:08.811 + Aug 24 12:26:08.811: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:26:08.814 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:26:08.846 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:26:08.85 + [BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 - [It] should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:156 - STEP: Deleting RuntimeClass runtimeclass-9828-delete-me 07/29/23 16:19:08.442 - STEP: Waiting for the RuntimeClass to disappear 07/29/23 16:19:08.453 - [AfterEach] [sig-node] RuntimeClass + [It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:99 + STEP: Creating configMap with name projected-configmap-test-volume-map-a7a01ee3-e5fc-4b22-8f0b-36d967a24c6b 08/24/23 12:26:08.853 + STEP: Creating a pod to test consume configMaps 08/24/23 12:26:08.861 + Aug 24 12:26:08.875: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1" in namespace "projected-7089" to be "Succeeded or Failed" + Aug 24 12:26:08.881: INFO: Pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1": Phase="Pending", Reason="", readiness=false. Elapsed: 6.636592ms + Aug 24 12:26:10.890: INFO: Pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01542597s + Aug 24 12:26:12.890: INFO: Pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1": Phase="Pending", Reason="", readiness=false. Elapsed: 4.015327567s + Aug 24 12:26:14.891: INFO: Pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016604244s + STEP: Saw pod success 08/24/23 12:26:14.892 + Aug 24 12:26:14.892: INFO: Pod "pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1" satisfied condition "Succeeded or Failed" + Aug 24 12:26:14.897: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1 container agnhost-container: + STEP: delete the pod 08/24/23 12:26:14.925 + Aug 24 12:26:14.947: INFO: Waiting for pod pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1 to disappear + Aug 24 12:26:14.952: INFO: Pod pod-projected-configmaps-1084ffcb-adfb-45c2-87a7-b3ecaf8d47c1 no longer exists + [AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:19:08.468: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] RuntimeClass + Aug 24 12:26:14.952: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] RuntimeClass + [DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] RuntimeClass + [DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 - STEP: Destroying namespace "runtimeclass-9828" for this suite. 07/29/23 16:19:08.474 + STEP: Destroying namespace "projected-7089" for this suite. 08/24/23 12:26:14.96 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Secrets - should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:57 -[BeforeEach] [sig-storage] Secrets +[sig-node] Pods + should contain environment variables for services [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:444 +[BeforeEach] [sig-node] Pods set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:19:08.487 -Jul 29 16:19:08.488: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 16:19:08.49 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:19:08.52 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:19:08.526 -[BeforeEach] [sig-storage] Secrets +STEP: Creating a kubernetes client 08/24/23 12:26:14.978 +Aug 24 12:26:14.979: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 12:26:14.98 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:26:15.017 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:26:15.023 +[BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:57 -STEP: Creating secret with name secret-test-3e62c266-1718-40f4-955e-b4ca0fc56984 07/29/23 16:19:08.532 -STEP: Creating a pod to test consume secrets 07/29/23 16:19:08.546 -Jul 29 16:19:08.560: INFO: Waiting up to 5m0s for pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929" in namespace "secrets-5068" to be "Succeeded or Failed" -Jul 29 16:19:08.570: INFO: Pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929": Phase="Pending", Reason="", readiness=false. Elapsed: 8.039665ms -Jul 29 16:19:10.577: INFO: Pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929": Phase="Running", Reason="", readiness=true. Elapsed: 2.01695885s -Jul 29 16:19:12.579: INFO: Pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929": Phase="Running", Reason="", readiness=false. Elapsed: 4.018669747s -Jul 29 16:19:14.579: INFO: Pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.018311489s -STEP: Saw pod success 07/29/23 16:19:14.579 -Jul 29 16:19:14.579: INFO: Pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929" satisfied condition "Succeeded or Failed" -Jul 29 16:19:14.585: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929 container secret-volume-test: -STEP: delete the pod 07/29/23 16:19:14.597 -Jul 29 16:19:14.615: INFO: Waiting for pod pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929 to disappear -Jul 29 16:19:14.621: INFO: Pod pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929 no longer exists -[AfterEach] [sig-storage] Secrets +[BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 +[It] should contain environment variables for services [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:444 +Aug 24 12:26:15.084: INFO: Waiting up to 5m0s for pod "server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b" in namespace "pods-2016" to be "running and ready" +Aug 24 12:26:15.095: INFO: Pod "server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b": Phase="Pending", Reason="", readiness=false. Elapsed: 11.549729ms +Aug 24 12:26:15.095: INFO: The phase of Pod server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:26:17.104: INFO: Pod "server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b": Phase="Running", Reason="", readiness=true. Elapsed: 2.020221792s +Aug 24 12:26:17.104: INFO: The phase of Pod server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b is Running (Ready = true) +Aug 24 12:26:17.104: INFO: Pod "server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b" satisfied condition "running and ready" +Aug 24 12:26:17.146: INFO: Waiting up to 5m0s for pod "client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8" in namespace "pods-2016" to be "Succeeded or Failed" +Aug 24 12:26:17.154: INFO: Pod "client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8": Phase="Pending", Reason="", readiness=false. Elapsed: 7.73876ms +Aug 24 12:26:19.160: INFO: Pod "client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014470863s +Aug 24 12:26:21.161: INFO: Pod "client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014864123s +STEP: Saw pod success 08/24/23 12:26:21.161 +Aug 24 12:26:21.162: INFO: Pod "client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8" satisfied condition "Succeeded or Failed" +Aug 24 12:26:21.168: INFO: Trying to get logs from node pe9deep4seen-3 pod client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8 container env3cont: +STEP: delete the pod 08/24/23 12:26:21.18 +Aug 24 12:26:21.198: INFO: Waiting for pod client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8 to disappear +Aug 24 12:26:21.203: INFO: Pod client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8 no longer exists +[AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 -Jul 29 16:19:14.621: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Secrets +Aug 24 12:26:21.204: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-5068" for this suite. 07/29/23 16:19:14.631 +STEP: Destroying namespace "pods-2016" for this suite. 08/24/23 12:26:21.211 ------------------------------ -• [SLOW TEST] [6.156 seconds] -[sig-storage] Secrets -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:57 +• [SLOW TEST] [6.244 seconds] +[sig-node] Pods +test/e2e/common/node/framework.go:23 + should contain environment variables for services [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:444 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Secrets + [BeforeEach] [sig-node] Pods set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:19:08.487 - Jul 29 16:19:08.488: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 16:19:08.49 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:19:08.52 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:19:08.526 - [BeforeEach] [sig-storage] Secrets + STEP: Creating a kubernetes client 08/24/23 12:26:14.978 + Aug 24 12:26:14.979: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 12:26:14.98 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:26:15.017 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:26:15.023 + [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:57 - STEP: Creating secret with name secret-test-3e62c266-1718-40f4-955e-b4ca0fc56984 07/29/23 16:19:08.532 - STEP: Creating a pod to test consume secrets 07/29/23 16:19:08.546 - Jul 29 16:19:08.560: INFO: Waiting up to 5m0s for pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929" in namespace "secrets-5068" to be "Succeeded or Failed" - Jul 29 16:19:08.570: INFO: Pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929": Phase="Pending", Reason="", readiness=false. Elapsed: 8.039665ms - Jul 29 16:19:10.577: INFO: Pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929": Phase="Running", Reason="", readiness=true. Elapsed: 2.01695885s - Jul 29 16:19:12.579: INFO: Pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929": Phase="Running", Reason="", readiness=false. Elapsed: 4.018669747s - Jul 29 16:19:14.579: INFO: Pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.018311489s - STEP: Saw pod success 07/29/23 16:19:14.579 - Jul 29 16:19:14.579: INFO: Pod "pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929" satisfied condition "Succeeded or Failed" - Jul 29 16:19:14.585: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929 container secret-volume-test: - STEP: delete the pod 07/29/23 16:19:14.597 - Jul 29 16:19:14.615: INFO: Waiting for pod pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929 to disappear - Jul 29 16:19:14.621: INFO: Pod pod-secrets-ca7fbcc9-63ca-406e-b5c1-70045f355929 no longer exists - [AfterEach] [sig-storage] Secrets + [BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 + [It] should contain environment variables for services [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:444 + Aug 24 12:26:15.084: INFO: Waiting up to 5m0s for pod "server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b" in namespace "pods-2016" to be "running and ready" + Aug 24 12:26:15.095: INFO: Pod "server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b": Phase="Pending", Reason="", readiness=false. Elapsed: 11.549729ms + Aug 24 12:26:15.095: INFO: The phase of Pod server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:26:17.104: INFO: Pod "server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b": Phase="Running", Reason="", readiness=true. Elapsed: 2.020221792s + Aug 24 12:26:17.104: INFO: The phase of Pod server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b is Running (Ready = true) + Aug 24 12:26:17.104: INFO: Pod "server-envvars-cef2a872-5945-4e63-9889-a6846f7be56b" satisfied condition "running and ready" + Aug 24 12:26:17.146: INFO: Waiting up to 5m0s for pod "client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8" in namespace "pods-2016" to be "Succeeded or Failed" + Aug 24 12:26:17.154: INFO: Pod "client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8": Phase="Pending", Reason="", readiness=false. Elapsed: 7.73876ms + Aug 24 12:26:19.160: INFO: Pod "client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014470863s + Aug 24 12:26:21.161: INFO: Pod "client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014864123s + STEP: Saw pod success 08/24/23 12:26:21.161 + Aug 24 12:26:21.162: INFO: Pod "client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8" satisfied condition "Succeeded or Failed" + Aug 24 12:26:21.168: INFO: Trying to get logs from node pe9deep4seen-3 pod client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8 container env3cont: + STEP: delete the pod 08/24/23 12:26:21.18 + Aug 24 12:26:21.198: INFO: Waiting for pod client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8 to disappear + Aug 24 12:26:21.203: INFO: Pod client-envvars-d706041e-3eb4-438d-b509-92843f3f8fc8 no longer exists + [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 - Jul 29 16:19:14.621: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Secrets + Aug 24 12:26:21.204: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-5068" for this suite. 07/29/23 16:19:14.631 + STEP: Destroying namespace "pods-2016" for this suite. 08/24/23 12:26:21.211 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] - Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] - test/e2e/apps/statefulset.go:587 + Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] + test/e2e/apps/statefulset.go:697 [BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:19:14.644 -Jul 29 16:19:14.645: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename statefulset 07/29/23 16:19:14.648 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:19:14.683 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:19:14.688 +STEP: Creating a kubernetes client 08/24/23 12:26:21.226 +Aug 24 12:26:21.226: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename statefulset 08/24/23 12:26:21.229 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:26:21.26 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:26:21.265 [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-apps] StatefulSet test/e2e/apps/statefulset.go:98 [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:113 -STEP: Creating service test in namespace statefulset-405 07/29/23 16:19:14.696 -[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] - test/e2e/apps/statefulset.go:587 -STEP: Initializing watcher for selector baz=blah,foo=bar 07/29/23 16:19:14.709 -STEP: Creating stateful set ss in namespace statefulset-405 07/29/23 16:19:14.719 -STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-405 07/29/23 16:19:14.733 -Jul 29 16:19:14.742: INFO: Found 0 stateful pods, waiting for 1 -Jul 29 16:19:24.747: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod 07/29/23 16:19:24.747 -Jul 29 16:19:24.752: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Jul 29 16:19:25.071: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Jul 29 16:19:25.071: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Jul 29 16:19:25.071: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Jul 29 16:19:25.081: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true -Jul 29 16:19:35.089: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false -Jul 29 16:19:35.089: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 16:19:35.118: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999465s -Jul 29 16:19:36.127: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.99435387s -Jul 29 16:19:37.138: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.985491003s -Jul 29 16:19:38.147: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.973451748s -Jul 29 16:19:39.159: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.965129632s -Jul 29 16:19:40.172: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.95275896s -Jul 29 16:19:41.182: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.940013847s -Jul 29 16:19:42.192: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.92949524s -Jul 29 16:19:43.199: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.920290048s -Jul 29 16:19:44.212: INFO: Verifying statefulset ss doesn't scale past 1 for another 913.382396ms -STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-405 07/29/23 16:19:45.213 -Jul 29 16:19:45.222: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Jul 29 16:19:45.507: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Jul 29 16:19:45.507: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Jul 29 16:19:45.508: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Jul 29 16:19:45.516: INFO: Found 1 stateful pods, waiting for 3 -Jul 29 16:19:55.529: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 16:19:55.529: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 16:19:55.530: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true -STEP: Verifying that stateful set ss was scaled up in order 07/29/23 16:19:55.53 -STEP: Scale down will halt with unhealthy stateful pod 07/29/23 16:19:55.53 -Jul 29 16:19:55.546: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Jul 29 16:19:55.867: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Jul 29 16:19:55.867: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Jul 29 16:19:55.867: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Jul 29 16:19:55.867: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Jul 29 16:19:56.173: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Jul 29 16:19:56.174: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Jul 29 16:19:56.174: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Jul 29 16:19:56.175: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Jul 29 16:19:56.472: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Jul 29 16:19:56.472: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Jul 29 16:19:56.472: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Jul 29 16:19:56.472: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 16:19:56.478: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 -Jul 29 16:20:06.495: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false -Jul 29 16:20:06.496: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false -Jul 29 16:20:06.496: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false -Jul 29 16:20:06.528: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.999999354s -Jul 29 16:20:07.539: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.986807427s -Jul 29 16:20:08.552: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.975982222s -Jul 29 16:20:09.564: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.962758016s -Jul 29 16:20:10.575: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.951336669s -Jul 29 16:20:11.587: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.939848597s -Jul 29 16:20:12.599: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.927997514s -Jul 29 16:20:13.612: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.915624893s -Jul 29 16:20:14.620: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.903380252s -Jul 29 16:20:15.633: INFO: Verifying statefulset ss doesn't scale past 3 for another 894.970653ms -STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-405 07/29/23 16:20:16.634 -Jul 29 16:20:16.648: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Jul 29 16:20:16.907: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Jul 29 16:20:16.907: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Jul 29 16:20:16.907: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Jul 29 16:20:16.908: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Jul 29 16:20:17.150: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Jul 29 16:20:17.150: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Jul 29 16:20:17.150: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Jul 29 16:20:17.150: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Jul 29 16:20:17.451: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Jul 29 16:20:17.451: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Jul 29 16:20:17.451: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Jul 29 16:20:17.451: INFO: Scaling statefulset ss to 0 -STEP: Verifying that stateful set ss was scaled down in reverse order 07/29/23 16:20:27.488 +STEP: Creating service test in namespace statefulset-2689 08/24/23 12:26:21.271 +[It] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] + test/e2e/apps/statefulset.go:697 +STEP: Creating stateful set ss in namespace statefulset-2689 08/24/23 12:26:21.284 +STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-2689 08/24/23 12:26:21.301 +Aug 24 12:26:21.308: INFO: Found 0 stateful pods, waiting for 1 +Aug 24 12:26:31.328: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod 08/24/23 12:26:31.329 +Aug 24 12:26:31.343: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Aug 24 12:26:31.682: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Aug 24 12:26:31.682: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Aug 24 12:26:31.682: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Aug 24 12:26:31.690: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +Aug 24 12:26:41.706: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Aug 24 12:26:41.712: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 12:26:41.751: INFO: POD NODE PHASE GRACE CONDITIONS +Aug 24 12:26:41.751: INFO: ss-0 pe9deep4seen-3 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:31 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:31 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC }] +Aug 24 12:26:41.752: INFO: +Aug 24 12:26:41.752: INFO: StatefulSet ss has not reached scale 3, at 1 +Aug 24 12:26:42.762: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.985040937s +Aug 24 12:26:43.773: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.97447662s +Aug 24 12:26:44.781: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.965122027s +Aug 24 12:26:45.791: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.956670854s +Aug 24 12:26:46.802: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.946226078s +Aug 24 12:26:47.812: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.936287521s +Aug 24 12:26:48.821: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.925969662s +Aug 24 12:26:49.830: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.91662521s +Aug 24 12:26:50.841: INFO: Verifying statefulset ss doesn't scale past 3 for another 907.488408ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-2689 08/24/23 12:26:51.842 +Aug 24 12:26:51.851: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Aug 24 12:26:52.153: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Aug 24 12:26:52.153: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Aug 24 12:26:52.153: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Aug 24 12:26:52.153: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Aug 24 12:26:52.397: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" +Aug 24 12:26:52.397: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Aug 24 12:26:52.397: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Aug 24 12:26:52.397: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Aug 24 12:26:52.700: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" +Aug 24 12:26:52.700: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Aug 24 12:26:52.700: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Aug 24 12:26:52.708: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false +Aug 24 12:27:02.720: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 12:27:02.720: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 12:27:02.720: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Scale down will not halt with unhealthy stateful pod 08/24/23 12:27:02.72 +Aug 24 12:27:02.728: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Aug 24 12:27:03.089: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Aug 24 12:27:03.089: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Aug 24 12:27:03.089: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Aug 24 12:27:03.090: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Aug 24 12:27:03.446: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Aug 24 12:27:03.446: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Aug 24 12:27:03.446: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Aug 24 12:27:03.447: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Aug 24 12:27:03.751: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Aug 24 12:27:03.751: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Aug 24 12:27:03.751: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Aug 24 12:27:03.751: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 12:27:03.757: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 +Aug 24 12:27:13.775: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Aug 24 12:27:13.776: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +Aug 24 12:27:13.776: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +Aug 24 12:27:13.806: INFO: POD NODE PHASE GRACE CONDITIONS +Aug 24 12:27:13.806: INFO: ss-0 pe9deep4seen-3 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:03 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:03 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC }] +Aug 24 12:27:13.807: INFO: ss-1 pe9deep4seen-2 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC }] +Aug 24 12:27:13.807: INFO: ss-2 pe9deep4seen-1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC }] +Aug 24 12:27:13.807: INFO: +Aug 24 12:27:13.807: INFO: StatefulSet ss has not reached scale 0, at 3 +Aug 24 12:27:14.813: INFO: POD NODE PHASE GRACE CONDITIONS +Aug 24 12:27:14.814: INFO: ss-0 pe9deep4seen-3 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:03 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:03 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC }] +Aug 24 12:27:14.814: INFO: ss-1 pe9deep4seen-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC }] +Aug 24 12:27:14.814: INFO: +Aug 24 12:27:14.814: INFO: StatefulSet ss has not reached scale 0, at 2 +Aug 24 12:27:15.822: INFO: Verifying statefulset ss doesn't scale past 0 for another 7.983616254s +Aug 24 12:27:16.828: INFO: Verifying statefulset ss doesn't scale past 0 for another 6.975798193s +Aug 24 12:27:17.836: INFO: Verifying statefulset ss doesn't scale past 0 for another 5.969662968s +Aug 24 12:27:18.842: INFO: Verifying statefulset ss doesn't scale past 0 for another 4.961710368s +Aug 24 12:27:19.849: INFO: Verifying statefulset ss doesn't scale past 0 for another 3.955257273s +Aug 24 12:27:20.856: INFO: Verifying statefulset ss doesn't scale past 0 for another 2.94885905s +Aug 24 12:27:21.862: INFO: Verifying statefulset ss doesn't scale past 0 for another 1.941384868s +Aug 24 12:27:22.870: INFO: Verifying statefulset ss doesn't scale past 0 for another 935.945647ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-2689 08/24/23 12:27:23.87 +Aug 24 12:27:23.878: INFO: Scaling statefulset ss to 0 +Aug 24 12:27:23.898: INFO: Waiting for statefulset status.replicas updated to 0 [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:124 -Jul 29 16:20:27.489: INFO: Deleting all statefulset in ns statefulset-405 -Jul 29 16:20:27.496: INFO: Scaling statefulset ss to 0 -Jul 29 16:20:27.529: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 16:20:27.538: INFO: Deleting statefulset ss +Aug 24 12:27:23.904: INFO: Deleting all statefulset in ns statefulset-2689 +Aug 24 12:27:23.909: INFO: Scaling statefulset ss to 0 +Aug 24 12:27:23.928: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 12:27:23.932: INFO: Deleting statefulset ss [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 -Jul 29 16:20:27.570: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:27:23.958: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 -STEP: Destroying namespace "statefulset-405" for this suite. 07/29/23 16:20:27.583 +STEP: Destroying namespace "statefulset-2689" for this suite. 08/24/23 12:27:23.97 ------------------------------ -• [SLOW TEST] [72.956 seconds] +• [SLOW TEST] [62.758 seconds] [sig-apps] StatefulSet test/e2e/apps/framework.go:23 Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:103 - Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] - test/e2e/apps/statefulset.go:587 + Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] + test/e2e/apps/statefulset.go:697 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:19:14.644 - Jul 29 16:19:14.645: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename statefulset 07/29/23 16:19:14.648 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:19:14.683 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:19:14.688 + STEP: Creating a kubernetes client 08/24/23 12:26:21.226 + Aug 24 12:26:21.226: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename statefulset 08/24/23 12:26:21.229 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:26:21.26 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:26:21.265 [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-apps] StatefulSet test/e2e/apps/statefulset.go:98 [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:113 - STEP: Creating service test in namespace statefulset-405 07/29/23 16:19:14.696 - [It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] - test/e2e/apps/statefulset.go:587 - STEP: Initializing watcher for selector baz=blah,foo=bar 07/29/23 16:19:14.709 - STEP: Creating stateful set ss in namespace statefulset-405 07/29/23 16:19:14.719 - STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-405 07/29/23 16:19:14.733 - Jul 29 16:19:14.742: INFO: Found 0 stateful pods, waiting for 1 - Jul 29 16:19:24.747: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true - STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod 07/29/23 16:19:24.747 - Jul 29 16:19:24.752: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' - Jul 29 16:19:25.071: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" - Jul 29 16:19:25.071: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" - Jul 29 16:19:25.071: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - - Jul 29 16:19:25.081: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true - Jul 29 16:19:35.089: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false - Jul 29 16:19:35.089: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 16:19:35.118: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999465s - Jul 29 16:19:36.127: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.99435387s - Jul 29 16:19:37.138: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.985491003s - Jul 29 16:19:38.147: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.973451748s - Jul 29 16:19:39.159: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.965129632s - Jul 29 16:19:40.172: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.95275896s - Jul 29 16:19:41.182: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.940013847s - Jul 29 16:19:42.192: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.92949524s - Jul 29 16:19:43.199: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.920290048s - Jul 29 16:19:44.212: INFO: Verifying statefulset ss doesn't scale past 1 for another 913.382396ms - STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-405 07/29/23 16:19:45.213 - Jul 29 16:19:45.222: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' - Jul 29 16:19:45.507: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" - Jul 29 16:19:45.507: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" - Jul 29 16:19:45.508: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - - Jul 29 16:19:45.516: INFO: Found 1 stateful pods, waiting for 3 - Jul 29 16:19:55.529: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 16:19:55.529: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 16:19:55.530: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true - STEP: Verifying that stateful set ss was scaled up in order 07/29/23 16:19:55.53 - STEP: Scale down will halt with unhealthy stateful pod 07/29/23 16:19:55.53 - Jul 29 16:19:55.546: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' - Jul 29 16:19:55.867: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" - Jul 29 16:19:55.867: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" - Jul 29 16:19:55.867: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - - Jul 29 16:19:55.867: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' - Jul 29 16:19:56.173: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" - Jul 29 16:19:56.174: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" - Jul 29 16:19:56.174: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - - Jul 29 16:19:56.175: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' - Jul 29 16:19:56.472: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" - Jul 29 16:19:56.472: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" - Jul 29 16:19:56.472: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - - Jul 29 16:19:56.472: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 16:19:56.478: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 - Jul 29 16:20:06.495: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false - Jul 29 16:20:06.496: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false - Jul 29 16:20:06.496: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false - Jul 29 16:20:06.528: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.999999354s - Jul 29 16:20:07.539: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.986807427s - Jul 29 16:20:08.552: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.975982222s - Jul 29 16:20:09.564: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.962758016s - Jul 29 16:20:10.575: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.951336669s - Jul 29 16:20:11.587: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.939848597s - Jul 29 16:20:12.599: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.927997514s - Jul 29 16:20:13.612: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.915624893s - Jul 29 16:20:14.620: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.903380252s - Jul 29 16:20:15.633: INFO: Verifying statefulset ss doesn't scale past 3 for another 894.970653ms - STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-405 07/29/23 16:20:16.634 - Jul 29 16:20:16.648: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' - Jul 29 16:20:16.907: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" - Jul 29 16:20:16.907: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" - Jul 29 16:20:16.907: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - - Jul 29 16:20:16.908: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' - Jul 29 16:20:17.150: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" - Jul 29 16:20:17.150: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" - Jul 29 16:20:17.150: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - - Jul 29 16:20:17.150: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-405 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' - Jul 29 16:20:17.451: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" - Jul 29 16:20:17.451: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" - Jul 29 16:20:17.451: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - - Jul 29 16:20:17.451: INFO: Scaling statefulset ss to 0 - STEP: Verifying that stateful set ss was scaled down in reverse order 07/29/23 16:20:27.488 + STEP: Creating service test in namespace statefulset-2689 08/24/23 12:26:21.271 + [It] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] + test/e2e/apps/statefulset.go:697 + STEP: Creating stateful set ss in namespace statefulset-2689 08/24/23 12:26:21.284 + STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-2689 08/24/23 12:26:21.301 + Aug 24 12:26:21.308: INFO: Found 0 stateful pods, waiting for 1 + Aug 24 12:26:31.328: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true + STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod 08/24/23 12:26:31.329 + Aug 24 12:26:31.343: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' + Aug 24 12:26:31.682: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" + Aug 24 12:26:31.682: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" + Aug 24 12:26:31.682: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + + Aug 24 12:26:31.690: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true + Aug 24 12:26:41.706: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false + Aug 24 12:26:41.712: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 12:26:41.751: INFO: POD NODE PHASE GRACE CONDITIONS + Aug 24 12:26:41.751: INFO: ss-0 pe9deep4seen-3 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:31 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:31 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC }] + Aug 24 12:26:41.752: INFO: + Aug 24 12:26:41.752: INFO: StatefulSet ss has not reached scale 3, at 1 + Aug 24 12:26:42.762: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.985040937s + Aug 24 12:26:43.773: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.97447662s + Aug 24 12:26:44.781: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.965122027s + Aug 24 12:26:45.791: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.956670854s + Aug 24 12:26:46.802: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.946226078s + Aug 24 12:26:47.812: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.936287521s + Aug 24 12:26:48.821: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.925969662s + Aug 24 12:26:49.830: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.91662521s + Aug 24 12:26:50.841: INFO: Verifying statefulset ss doesn't scale past 3 for another 907.488408ms + STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-2689 08/24/23 12:26:51.842 + Aug 24 12:26:51.851: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' + Aug 24 12:26:52.153: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" + Aug 24 12:26:52.153: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" + Aug 24 12:26:52.153: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + + Aug 24 12:26:52.153: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' + Aug 24 12:26:52.397: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" + Aug 24 12:26:52.397: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" + Aug 24 12:26:52.397: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + + Aug 24 12:26:52.397: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' + Aug 24 12:26:52.700: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" + Aug 24 12:26:52.700: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" + Aug 24 12:26:52.700: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + + Aug 24 12:26:52.708: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false + Aug 24 12:27:02.720: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 12:27:02.720: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 12:27:02.720: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true + STEP: Scale down will not halt with unhealthy stateful pod 08/24/23 12:27:02.72 + Aug 24 12:27:02.728: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' + Aug 24 12:27:03.089: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" + Aug 24 12:27:03.089: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" + Aug 24 12:27:03.089: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + + Aug 24 12:27:03.090: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' + Aug 24 12:27:03.446: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" + Aug 24 12:27:03.446: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" + Aug 24 12:27:03.446: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + + Aug 24 12:27:03.447: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-2689 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' + Aug 24 12:27:03.751: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" + Aug 24 12:27:03.751: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" + Aug 24 12:27:03.751: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + + Aug 24 12:27:03.751: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 12:27:03.757: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 + Aug 24 12:27:13.775: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false + Aug 24 12:27:13.776: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false + Aug 24 12:27:13.776: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false + Aug 24 12:27:13.806: INFO: POD NODE PHASE GRACE CONDITIONS + Aug 24 12:27:13.806: INFO: ss-0 pe9deep4seen-3 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:03 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:03 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC }] + Aug 24 12:27:13.807: INFO: ss-1 pe9deep4seen-2 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC }] + Aug 24 12:27:13.807: INFO: ss-2 pe9deep4seen-1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC }] + Aug 24 12:27:13.807: INFO: + Aug 24 12:27:13.807: INFO: StatefulSet ss has not reached scale 0, at 3 + Aug 24 12:27:14.813: INFO: POD NODE PHASE GRACE CONDITIONS + Aug 24 12:27:14.814: INFO: ss-0 pe9deep4seen-3 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:03 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:03 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:21 +0000 UTC }] + Aug 24 12:27:14.814: INFO: ss-1 pe9deep4seen-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:27:04 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-08-24 12:26:41 +0000 UTC }] + Aug 24 12:27:14.814: INFO: + Aug 24 12:27:14.814: INFO: StatefulSet ss has not reached scale 0, at 2 + Aug 24 12:27:15.822: INFO: Verifying statefulset ss doesn't scale past 0 for another 7.983616254s + Aug 24 12:27:16.828: INFO: Verifying statefulset ss doesn't scale past 0 for another 6.975798193s + Aug 24 12:27:17.836: INFO: Verifying statefulset ss doesn't scale past 0 for another 5.969662968s + Aug 24 12:27:18.842: INFO: Verifying statefulset ss doesn't scale past 0 for another 4.961710368s + Aug 24 12:27:19.849: INFO: Verifying statefulset ss doesn't scale past 0 for another 3.955257273s + Aug 24 12:27:20.856: INFO: Verifying statefulset ss doesn't scale past 0 for another 2.94885905s + Aug 24 12:27:21.862: INFO: Verifying statefulset ss doesn't scale past 0 for another 1.941384868s + Aug 24 12:27:22.870: INFO: Verifying statefulset ss doesn't scale past 0 for another 935.945647ms + STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-2689 08/24/23 12:27:23.87 + Aug 24 12:27:23.878: INFO: Scaling statefulset ss to 0 + Aug 24 12:27:23.898: INFO: Waiting for statefulset status.replicas updated to 0 [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:124 - Jul 29 16:20:27.489: INFO: Deleting all statefulset in ns statefulset-405 - Jul 29 16:20:27.496: INFO: Scaling statefulset ss to 0 - Jul 29 16:20:27.529: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 16:20:27.538: INFO: Deleting statefulset ss + Aug 24 12:27:23.904: INFO: Deleting all statefulset in ns statefulset-2689 + Aug 24 12:27:23.909: INFO: Scaling statefulset ss to 0 + Aug 24 12:27:23.928: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 12:27:23.932: INFO: Deleting statefulset ss [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 - Jul 29 16:20:27.570: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:27:23.958: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 - STEP: Destroying namespace "statefulset-405" for this suite. 07/29/23 16:20:27.583 + STEP: Destroying namespace "statefulset-2689" for this suite. 08/24/23 12:27:23.97 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSS ------------------------------ -[sig-node] Pods - should be updated [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:344 -[BeforeEach] [sig-node] Pods +[sig-network] Services + should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] + test/e2e/network/service.go:2191 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:20:27.606 -Jul 29 16:20:27.606: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 16:20:27.613 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:27.647 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:27.652 -[BeforeEach] [sig-node] Pods +STEP: Creating a kubernetes client 08/24/23 12:27:23.995 +Aug 24 12:27:23.995: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 12:27:23.998 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:24.03 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:24.036 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 -[It] should be updated [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:344 -STEP: creating the pod 07/29/23 16:20:27.658 -STEP: submitting the pod to kubernetes 07/29/23 16:20:27.659 -Jul 29 16:20:27.675: INFO: Waiting up to 5m0s for pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d" in namespace "pods-5875" to be "running and ready" -Jul 29 16:20:27.684: INFO: Pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d": Phase="Pending", Reason="", readiness=false. Elapsed: 8.708587ms -Jul 29 16:20:27.684: INFO: The phase of Pod pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:20:29.694: INFO: Pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d": Phase="Running", Reason="", readiness=true. Elapsed: 2.01834146s -Jul 29 16:20:29.694: INFO: The phase of Pod pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d is Running (Ready = true) -Jul 29 16:20:29.694: INFO: Pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d" satisfied condition "running and ready" -STEP: verifying the pod is in kubernetes 07/29/23 16:20:29.701 -STEP: updating the pod 07/29/23 16:20:29.707 -Jul 29 16:20:30.228: INFO: Successfully updated pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d" -Jul 29 16:20:30.228: INFO: Waiting up to 5m0s for pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d" in namespace "pods-5875" to be "running" -Jul 29 16:20:30.233: INFO: Pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d": Phase="Running", Reason="", readiness=true. Elapsed: 4.847766ms -Jul 29 16:20:30.233: INFO: Pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d" satisfied condition "running" -STEP: verifying the updated pod is in kubernetes 07/29/23 16:20:30.233 -Jul 29 16:20:30.255: INFO: Pod update OK -[AfterEach] [sig-node] Pods +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] + test/e2e/network/service.go:2191 +STEP: creating service in namespace services-7870 08/24/23 12:27:24.04 +STEP: creating service affinity-clusterip in namespace services-7870 08/24/23 12:27:24.041 +STEP: creating replication controller affinity-clusterip in namespace services-7870 08/24/23 12:27:24.069 +I0824 12:27:24.101778 14 runners.go:193] Created replication controller with name: affinity-clusterip, namespace: services-7870, replica count: 3 +I0824 12:27:27.155872 14 runners.go:193] affinity-clusterip Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Aug 24 12:27:27.174: INFO: Creating new exec pod +Aug 24 12:27:27.197: INFO: Waiting up to 5m0s for pod "execpod-affinity9xqkr" in namespace "services-7870" to be "running" +Aug 24 12:27:27.207: INFO: Pod "execpod-affinity9xqkr": Phase="Pending", Reason="", readiness=false. Elapsed: 10.305999ms +Aug 24 12:27:29.215: INFO: Pod "execpod-affinity9xqkr": Phase="Running", Reason="", readiness=true. Elapsed: 2.017902207s +Aug 24 12:27:29.215: INFO: Pod "execpod-affinity9xqkr" satisfied condition "running" +Aug 24 12:27:30.216: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7870 exec execpod-affinity9xqkr -- /bin/sh -x -c nc -v -z -w 2 affinity-clusterip 80' +Aug 24 12:27:30.567: INFO: stderr: "+ nc -v -z -w 2 affinity-clusterip 80\nConnection to affinity-clusterip 80 port [tcp/http] succeeded!\n" +Aug 24 12:27:30.567: INFO: stdout: "" +Aug 24 12:27:30.569: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7870 exec execpod-affinity9xqkr -- /bin/sh -x -c nc -v -z -w 2 10.233.43.126 80' +Aug 24 12:27:30.839: INFO: stderr: "+ nc -v -z -w 2 10.233.43.126 80\nConnection to 10.233.43.126 80 port [tcp/http] succeeded!\n" +Aug 24 12:27:30.839: INFO: stdout: "" +Aug 24 12:27:30.839: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7870 exec execpod-affinity9xqkr -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.43.126:80/ ; done' +Aug 24 12:27:31.341: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n" +Aug 24 12:27:31.341: INFO: stdout: "\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv" +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv +Aug 24 12:27:31.341: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-clusterip in namespace services-7870, will wait for the garbage collector to delete the pods 08/24/23 12:27:31.36 +Aug 24 12:27:31.446: INFO: Deleting ReplicationController affinity-clusterip took: 18.530844ms +Aug 24 12:27:31.547: INFO: Terminating ReplicationController affinity-clusterip pods took: 101.200372ms +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 16:20:30.257: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods +Aug 24 12:27:33.604: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "pods-5875" for this suite. 07/29/23 16:20:30.267 +STEP: Destroying namespace "services-7870" for this suite. 08/24/23 12:27:33.616 ------------------------------ -• [2.676 seconds] -[sig-node] Pods -test/e2e/common/node/framework.go:23 - should be updated [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:344 +• [SLOW TEST] [9.638 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] + test/e2e/network/service.go:2191 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-network] Services + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:27:23.995 + Aug 24 12:27:23.995: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 12:27:23.998 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:24.03 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:24.036 + [BeforeEach] [sig-network] Services + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] + test/e2e/network/service.go:2191 + STEP: creating service in namespace services-7870 08/24/23 12:27:24.04 + STEP: creating service affinity-clusterip in namespace services-7870 08/24/23 12:27:24.041 + STEP: creating replication controller affinity-clusterip in namespace services-7870 08/24/23 12:27:24.069 + I0824 12:27:24.101778 14 runners.go:193] Created replication controller with name: affinity-clusterip, namespace: services-7870, replica count: 3 + I0824 12:27:27.155872 14 runners.go:193] affinity-clusterip Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + Aug 24 12:27:27.174: INFO: Creating new exec pod + Aug 24 12:27:27.197: INFO: Waiting up to 5m0s for pod "execpod-affinity9xqkr" in namespace "services-7870" to be "running" + Aug 24 12:27:27.207: INFO: Pod "execpod-affinity9xqkr": Phase="Pending", Reason="", readiness=false. Elapsed: 10.305999ms + Aug 24 12:27:29.215: INFO: Pod "execpod-affinity9xqkr": Phase="Running", Reason="", readiness=true. Elapsed: 2.017902207s + Aug 24 12:27:29.215: INFO: Pod "execpod-affinity9xqkr" satisfied condition "running" + Aug 24 12:27:30.216: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7870 exec execpod-affinity9xqkr -- /bin/sh -x -c nc -v -z -w 2 affinity-clusterip 80' + Aug 24 12:27:30.567: INFO: stderr: "+ nc -v -z -w 2 affinity-clusterip 80\nConnection to affinity-clusterip 80 port [tcp/http] succeeded!\n" + Aug 24 12:27:30.567: INFO: stdout: "" + Aug 24 12:27:30.569: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7870 exec execpod-affinity9xqkr -- /bin/sh -x -c nc -v -z -w 2 10.233.43.126 80' + Aug 24 12:27:30.839: INFO: stderr: "+ nc -v -z -w 2 10.233.43.126 80\nConnection to 10.233.43.126 80 port [tcp/http] succeeded!\n" + Aug 24 12:27:30.839: INFO: stdout: "" + Aug 24 12:27:30.839: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7870 exec execpod-affinity9xqkr -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.43.126:80/ ; done' + Aug 24 12:27:31.341: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.43.126:80/\n" + Aug 24 12:27:31.341: INFO: stdout: "\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv\naffinity-clusterip-k94xv" + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Received response from host: affinity-clusterip-k94xv + Aug 24 12:27:31.341: INFO: Cleaning up the exec pod + STEP: deleting ReplicationController affinity-clusterip in namespace services-7870, will wait for the garbage collector to delete the pods 08/24/23 12:27:31.36 + Aug 24 12:27:31.446: INFO: Deleting ReplicationController affinity-clusterip took: 18.530844ms + Aug 24 12:27:31.547: INFO: Terminating ReplicationController affinity-clusterip pods took: 101.200372ms + [AfterEach] [sig-network] Services + test/e2e/framework/node/init/init.go:32 + Aug 24 12:27:33.604: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-network] Services + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-network] Services + tear down framework | framework.go:193 + STEP: Destroying namespace "services-7870" for this suite. 08/24/23 12:27:33.616 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl logs + should be able to retrieve and filter logs [Conformance] + test/e2e/kubectl/kubectl.go:1592 +[BeforeEach] [sig-cli] Kubectl client + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:27:33.643 +Aug 24 12:27:33.643: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:27:33.645 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:33.676 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:33.687 +[BeforeEach] [sig-cli] Kubectl client + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[BeforeEach] Kubectl logs + test/e2e/kubectl/kubectl.go:1572 +STEP: creating an pod 08/24/23 12:27:33.695 +Aug 24 12:27:33.696: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 run logs-generator --image=registry.k8s.io/e2e-test-images/agnhost:2.43 --restart=Never --pod-running-timeout=2m0s -- logs-generator --log-lines-total 100 --run-duration 20s' +Aug 24 12:27:33.853: INFO: stderr: "" +Aug 24 12:27:33.853: INFO: stdout: "pod/logs-generator created\n" +[It] should be able to retrieve and filter logs [Conformance] + test/e2e/kubectl/kubectl.go:1592 +STEP: Waiting for log generator to start. 08/24/23 12:27:33.854 +Aug 24 12:27:33.854: INFO: Waiting up to 5m0s for 1 pods to be running and ready, or succeeded: [logs-generator] +Aug 24 12:27:33.854: INFO: Waiting up to 5m0s for pod "logs-generator" in namespace "kubectl-9675" to be "running and ready, or succeeded" +Aug 24 12:27:33.861: INFO: Pod "logs-generator": Phase="Pending", Reason="", readiness=false. Elapsed: 6.967357ms +Aug 24 12:27:33.862: INFO: Error evaluating pod condition running and ready, or succeeded: want pod 'logs-generator' on 'pe9deep4seen-3' to be 'Running' but was 'Pending' +Aug 24 12:27:35.870: INFO: Pod "logs-generator": Phase="Running", Reason="", readiness=true. Elapsed: 2.015848454s +Aug 24 12:27:35.870: INFO: Pod "logs-generator" satisfied condition "running and ready, or succeeded" +Aug 24 12:27:35.870: INFO: Wanted all 1 pods to be running and ready, or succeeded. Result: true. Pods: [logs-generator] +STEP: checking for a matching strings 08/24/23 12:27:35.87 +Aug 24 12:27:35.871: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator' +Aug 24 12:27:36.017: INFO: stderr: "" +Aug 24 12:27:36.017: INFO: stdout: "I0824 12:27:34.930978 1 logs_generator.go:76] 0 PUT /api/v1/namespaces/ns/pods/znv4 210\nI0824 12:27:35.131199 1 logs_generator.go:76] 1 PUT /api/v1/namespaces/kube-system/pods/8sbt 544\nI0824 12:27:35.331694 1 logs_generator.go:76] 2 PUT /api/v1/namespaces/default/pods/svw 480\nI0824 12:27:35.531185 1 logs_generator.go:76] 3 PUT /api/v1/namespaces/default/pods/bgxt 257\nI0824 12:27:35.731614 1 logs_generator.go:76] 4 GET /api/v1/namespaces/default/pods/zj8k 229\nI0824 12:27:35.932177 1 logs_generator.go:76] 5 POST /api/v1/namespaces/default/pods/lzpd 541\n" +STEP: limiting log lines 08/24/23 12:27:36.017 +Aug 24 12:27:36.018: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator --tail=1' +Aug 24 12:27:36.172: INFO: stderr: "" +Aug 24 12:27:36.172: INFO: stdout: "I0824 12:27:36.131698 1 logs_generator.go:76] 6 POST /api/v1/namespaces/default/pods/5lr 231\n" +Aug 24 12:27:36.172: INFO: got output "I0824 12:27:36.131698 1 logs_generator.go:76] 6 POST /api/v1/namespaces/default/pods/5lr 231\n" +STEP: limiting log bytes 08/24/23 12:27:36.172 +Aug 24 12:27:36.172: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator --limit-bytes=1' +Aug 24 12:27:36.331: INFO: stderr: "" +Aug 24 12:27:36.331: INFO: stdout: "I" +Aug 24 12:27:36.331: INFO: got output "I" +STEP: exposing timestamps 08/24/23 12:27:36.331 +Aug 24 12:27:36.332: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator --tail=1 --timestamps' +Aug 24 12:27:36.499: INFO: stderr: "" +Aug 24 12:27:36.499: INFO: stdout: "2023-08-24T12:27:36.331014638Z I0824 12:27:36.330868 1 logs_generator.go:76] 7 PUT /api/v1/namespaces/default/pods/pcl 583\n" +Aug 24 12:27:36.499: INFO: got output "2023-08-24T12:27:36.331014638Z I0824 12:27:36.330868 1 logs_generator.go:76] 7 PUT /api/v1/namespaces/default/pods/pcl 583\n" +STEP: restricting to a time range 08/24/23 12:27:36.499 +Aug 24 12:27:39.000: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator --since=1s' +Aug 24 12:27:39.171: INFO: stderr: "" +Aug 24 12:27:39.171: INFO: stdout: "I0824 12:27:38.331636 1 logs_generator.go:76] 17 GET /api/v1/namespaces/default/pods/4pc 348\nI0824 12:27:38.530935 1 logs_generator.go:76] 18 POST /api/v1/namespaces/default/pods/6cn2 210\nI0824 12:27:38.731376 1 logs_generator.go:76] 19 PUT /api/v1/namespaces/default/pods/p57k 514\nI0824 12:27:38.931626 1 logs_generator.go:76] 20 POST /api/v1/namespaces/kube-system/pods/5t4 448\nI0824 12:27:39.131113 1 logs_generator.go:76] 21 POST /api/v1/namespaces/kube-system/pods/jp7 327\n" +Aug 24 12:27:39.172: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator --since=24h' +Aug 24 12:27:39.357: INFO: stderr: "" +Aug 24 12:27:39.357: INFO: stdout: "I0824 12:27:34.930978 1 logs_generator.go:76] 0 PUT /api/v1/namespaces/ns/pods/znv4 210\nI0824 12:27:35.131199 1 logs_generator.go:76] 1 PUT /api/v1/namespaces/kube-system/pods/8sbt 544\nI0824 12:27:35.331694 1 logs_generator.go:76] 2 PUT /api/v1/namespaces/default/pods/svw 480\nI0824 12:27:35.531185 1 logs_generator.go:76] 3 PUT /api/v1/namespaces/default/pods/bgxt 257\nI0824 12:27:35.731614 1 logs_generator.go:76] 4 GET /api/v1/namespaces/default/pods/zj8k 229\nI0824 12:27:35.932177 1 logs_generator.go:76] 5 POST /api/v1/namespaces/default/pods/lzpd 541\nI0824 12:27:36.131698 1 logs_generator.go:76] 6 POST /api/v1/namespaces/default/pods/5lr 231\nI0824 12:27:36.330868 1 logs_generator.go:76] 7 PUT /api/v1/namespaces/default/pods/pcl 583\nI0824 12:27:36.531347 1 logs_generator.go:76] 8 POST /api/v1/namespaces/ns/pods/nl7k 500\nI0824 12:27:36.730834 1 logs_generator.go:76] 9 POST /api/v1/namespaces/default/pods/pqc 446\nI0824 12:27:36.931352 1 logs_generator.go:76] 10 POST /api/v1/namespaces/kube-system/pods/k8tf 366\nI0824 12:27:37.130756 1 logs_generator.go:76] 11 PUT /api/v1/namespaces/ns/pods/hs5 405\nI0824 12:27:37.331332 1 logs_generator.go:76] 12 PUT /api/v1/namespaces/kube-system/pods/2th 310\nI0824 12:27:37.531642 1 logs_generator.go:76] 13 GET /api/v1/namespaces/default/pods/xw8 209\nI0824 12:27:37.731132 1 logs_generator.go:76] 14 GET /api/v1/namespaces/default/pods/s62 555\nI0824 12:27:37.931919 1 logs_generator.go:76] 15 PUT /api/v1/namespaces/default/pods/xbsm 523\nI0824 12:27:38.131114 1 logs_generator.go:76] 16 GET /api/v1/namespaces/ns/pods/wb4 412\nI0824 12:27:38.331636 1 logs_generator.go:76] 17 GET /api/v1/namespaces/default/pods/4pc 348\nI0824 12:27:38.530935 1 logs_generator.go:76] 18 POST /api/v1/namespaces/default/pods/6cn2 210\nI0824 12:27:38.731376 1 logs_generator.go:76] 19 PUT /api/v1/namespaces/default/pods/p57k 514\nI0824 12:27:38.931626 1 logs_generator.go:76] 20 POST /api/v1/namespaces/kube-system/pods/5t4 448\nI0824 12:27:39.131113 1 logs_generator.go:76] 21 POST /api/v1/namespaces/kube-system/pods/jp7 327\nI0824 12:27:39.331322 1 logs_generator.go:76] 22 GET /api/v1/namespaces/default/pods/mcjq 305\n" +[AfterEach] Kubectl logs + test/e2e/kubectl/kubectl.go:1577 +Aug 24 12:27:39.358: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 delete pod logs-generator' +Aug 24 12:27:40.273: INFO: stderr: "" +Aug 24 12:27:40.273: INFO: stdout: "pod \"logs-generator\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + test/e2e/framework/node/init/init.go:32 +Aug 24 12:27:40.273: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-cli] Kubectl client + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-cli] Kubectl client + tear down framework | framework.go:193 +STEP: Destroying namespace "kubectl-9675" for this suite. 08/24/23 12:27:40.287 +------------------------------ +• [SLOW TEST] [6.658 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Kubectl logs + test/e2e/kubectl/kubectl.go:1569 + should be able to retrieve and filter logs [Conformance] + test/e2e/kubectl/kubectl.go:1592 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-cli] Kubectl client + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:27:33.643 + Aug 24 12:27:33.643: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:27:33.645 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:33.676 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:33.687 + [BeforeEach] [sig-cli] Kubectl client + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [BeforeEach] Kubectl logs + test/e2e/kubectl/kubectl.go:1572 + STEP: creating an pod 08/24/23 12:27:33.695 + Aug 24 12:27:33.696: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 run logs-generator --image=registry.k8s.io/e2e-test-images/agnhost:2.43 --restart=Never --pod-running-timeout=2m0s -- logs-generator --log-lines-total 100 --run-duration 20s' + Aug 24 12:27:33.853: INFO: stderr: "" + Aug 24 12:27:33.853: INFO: stdout: "pod/logs-generator created\n" + [It] should be able to retrieve and filter logs [Conformance] + test/e2e/kubectl/kubectl.go:1592 + STEP: Waiting for log generator to start. 08/24/23 12:27:33.854 + Aug 24 12:27:33.854: INFO: Waiting up to 5m0s for 1 pods to be running and ready, or succeeded: [logs-generator] + Aug 24 12:27:33.854: INFO: Waiting up to 5m0s for pod "logs-generator" in namespace "kubectl-9675" to be "running and ready, or succeeded" + Aug 24 12:27:33.861: INFO: Pod "logs-generator": Phase="Pending", Reason="", readiness=false. Elapsed: 6.967357ms + Aug 24 12:27:33.862: INFO: Error evaluating pod condition running and ready, or succeeded: want pod 'logs-generator' on 'pe9deep4seen-3' to be 'Running' but was 'Pending' + Aug 24 12:27:35.870: INFO: Pod "logs-generator": Phase="Running", Reason="", readiness=true. Elapsed: 2.015848454s + Aug 24 12:27:35.870: INFO: Pod "logs-generator" satisfied condition "running and ready, or succeeded" + Aug 24 12:27:35.870: INFO: Wanted all 1 pods to be running and ready, or succeeded. Result: true. Pods: [logs-generator] + STEP: checking for a matching strings 08/24/23 12:27:35.87 + Aug 24 12:27:35.871: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator' + Aug 24 12:27:36.017: INFO: stderr: "" + Aug 24 12:27:36.017: INFO: stdout: "I0824 12:27:34.930978 1 logs_generator.go:76] 0 PUT /api/v1/namespaces/ns/pods/znv4 210\nI0824 12:27:35.131199 1 logs_generator.go:76] 1 PUT /api/v1/namespaces/kube-system/pods/8sbt 544\nI0824 12:27:35.331694 1 logs_generator.go:76] 2 PUT /api/v1/namespaces/default/pods/svw 480\nI0824 12:27:35.531185 1 logs_generator.go:76] 3 PUT /api/v1/namespaces/default/pods/bgxt 257\nI0824 12:27:35.731614 1 logs_generator.go:76] 4 GET /api/v1/namespaces/default/pods/zj8k 229\nI0824 12:27:35.932177 1 logs_generator.go:76] 5 POST /api/v1/namespaces/default/pods/lzpd 541\n" + STEP: limiting log lines 08/24/23 12:27:36.017 + Aug 24 12:27:36.018: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator --tail=1' + Aug 24 12:27:36.172: INFO: stderr: "" + Aug 24 12:27:36.172: INFO: stdout: "I0824 12:27:36.131698 1 logs_generator.go:76] 6 POST /api/v1/namespaces/default/pods/5lr 231\n" + Aug 24 12:27:36.172: INFO: got output "I0824 12:27:36.131698 1 logs_generator.go:76] 6 POST /api/v1/namespaces/default/pods/5lr 231\n" + STEP: limiting log bytes 08/24/23 12:27:36.172 + Aug 24 12:27:36.172: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator --limit-bytes=1' + Aug 24 12:27:36.331: INFO: stderr: "" + Aug 24 12:27:36.331: INFO: stdout: "I" + Aug 24 12:27:36.331: INFO: got output "I" + STEP: exposing timestamps 08/24/23 12:27:36.331 + Aug 24 12:27:36.332: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator --tail=1 --timestamps' + Aug 24 12:27:36.499: INFO: stderr: "" + Aug 24 12:27:36.499: INFO: stdout: "2023-08-24T12:27:36.331014638Z I0824 12:27:36.330868 1 logs_generator.go:76] 7 PUT /api/v1/namespaces/default/pods/pcl 583\n" + Aug 24 12:27:36.499: INFO: got output "2023-08-24T12:27:36.331014638Z I0824 12:27:36.330868 1 logs_generator.go:76] 7 PUT /api/v1/namespaces/default/pods/pcl 583\n" + STEP: restricting to a time range 08/24/23 12:27:36.499 + Aug 24 12:27:39.000: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator --since=1s' + Aug 24 12:27:39.171: INFO: stderr: "" + Aug 24 12:27:39.171: INFO: stdout: "I0824 12:27:38.331636 1 logs_generator.go:76] 17 GET /api/v1/namespaces/default/pods/4pc 348\nI0824 12:27:38.530935 1 logs_generator.go:76] 18 POST /api/v1/namespaces/default/pods/6cn2 210\nI0824 12:27:38.731376 1 logs_generator.go:76] 19 PUT /api/v1/namespaces/default/pods/p57k 514\nI0824 12:27:38.931626 1 logs_generator.go:76] 20 POST /api/v1/namespaces/kube-system/pods/5t4 448\nI0824 12:27:39.131113 1 logs_generator.go:76] 21 POST /api/v1/namespaces/kube-system/pods/jp7 327\n" + Aug 24 12:27:39.172: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 logs logs-generator logs-generator --since=24h' + Aug 24 12:27:39.357: INFO: stderr: "" + Aug 24 12:27:39.357: INFO: stdout: "I0824 12:27:34.930978 1 logs_generator.go:76] 0 PUT /api/v1/namespaces/ns/pods/znv4 210\nI0824 12:27:35.131199 1 logs_generator.go:76] 1 PUT /api/v1/namespaces/kube-system/pods/8sbt 544\nI0824 12:27:35.331694 1 logs_generator.go:76] 2 PUT /api/v1/namespaces/default/pods/svw 480\nI0824 12:27:35.531185 1 logs_generator.go:76] 3 PUT /api/v1/namespaces/default/pods/bgxt 257\nI0824 12:27:35.731614 1 logs_generator.go:76] 4 GET /api/v1/namespaces/default/pods/zj8k 229\nI0824 12:27:35.932177 1 logs_generator.go:76] 5 POST /api/v1/namespaces/default/pods/lzpd 541\nI0824 12:27:36.131698 1 logs_generator.go:76] 6 POST /api/v1/namespaces/default/pods/5lr 231\nI0824 12:27:36.330868 1 logs_generator.go:76] 7 PUT /api/v1/namespaces/default/pods/pcl 583\nI0824 12:27:36.531347 1 logs_generator.go:76] 8 POST /api/v1/namespaces/ns/pods/nl7k 500\nI0824 12:27:36.730834 1 logs_generator.go:76] 9 POST /api/v1/namespaces/default/pods/pqc 446\nI0824 12:27:36.931352 1 logs_generator.go:76] 10 POST /api/v1/namespaces/kube-system/pods/k8tf 366\nI0824 12:27:37.130756 1 logs_generator.go:76] 11 PUT /api/v1/namespaces/ns/pods/hs5 405\nI0824 12:27:37.331332 1 logs_generator.go:76] 12 PUT /api/v1/namespaces/kube-system/pods/2th 310\nI0824 12:27:37.531642 1 logs_generator.go:76] 13 GET /api/v1/namespaces/default/pods/xw8 209\nI0824 12:27:37.731132 1 logs_generator.go:76] 14 GET /api/v1/namespaces/default/pods/s62 555\nI0824 12:27:37.931919 1 logs_generator.go:76] 15 PUT /api/v1/namespaces/default/pods/xbsm 523\nI0824 12:27:38.131114 1 logs_generator.go:76] 16 GET /api/v1/namespaces/ns/pods/wb4 412\nI0824 12:27:38.331636 1 logs_generator.go:76] 17 GET /api/v1/namespaces/default/pods/4pc 348\nI0824 12:27:38.530935 1 logs_generator.go:76] 18 POST /api/v1/namespaces/default/pods/6cn2 210\nI0824 12:27:38.731376 1 logs_generator.go:76] 19 PUT /api/v1/namespaces/default/pods/p57k 514\nI0824 12:27:38.931626 1 logs_generator.go:76] 20 POST /api/v1/namespaces/kube-system/pods/5t4 448\nI0824 12:27:39.131113 1 logs_generator.go:76] 21 POST /api/v1/namespaces/kube-system/pods/jp7 327\nI0824 12:27:39.331322 1 logs_generator.go:76] 22 GET /api/v1/namespaces/default/pods/mcjq 305\n" + [AfterEach] Kubectl logs + test/e2e/kubectl/kubectl.go:1577 + Aug 24 12:27:39.358: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9675 delete pod logs-generator' + Aug 24 12:27:40.273: INFO: stderr: "" + Aug 24 12:27:40.273: INFO: stdout: "pod \"logs-generator\" deleted\n" + [AfterEach] [sig-cli] Kubectl client + test/e2e/framework/node/init/init.go:32 + Aug 24 12:27:40.273: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-cli] Kubectl client + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-cli] Kubectl client + tear down framework | framework.go:193 + STEP: Destroying namespace "kubectl-9675" for this suite. 08/24/23 12:27:40.287 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Deployment + Deployment should have a working scale subresource [Conformance] + test/e2e/apps/deployment.go:150 +[BeforeEach] [sig-apps] Deployment + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:27:40.303 +Aug 24 12:27:40.303: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename deployment 08/24/23 12:27:40.305 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:40.336 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:40.344 +[BeforeEach] [sig-apps] Deployment + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 +[It] Deployment should have a working scale subresource [Conformance] + test/e2e/apps/deployment.go:150 +Aug 24 12:27:40.349: INFO: Creating simple deployment test-new-deployment +Aug 24 12:27:40.385: INFO: deployment "test-new-deployment" doesn't have the required revision set +STEP: getting scale subresource 08/24/23 12:27:42.411 +STEP: updating a scale subresource 08/24/23 12:27:42.416 +STEP: verifying the deployment Spec.Replicas was modified 08/24/23 12:27:42.428 +STEP: Patch a scale subresource 08/24/23 12:27:42.435 +[AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 +Aug 24 12:27:42.464: INFO: Deployment "test-new-deployment": +&Deployment{ObjectMeta:{test-new-deployment deployment-4918 acb71e51-b7e0-4ba6-a924-3a0767ad3607 20995 3 2023-08-24 12:27:40 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 FieldsV1 {"f:spec":{"f:replicas":{}}} scale} {e2e.test Update apps/v1 2023-08-24 12:27:40 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*4,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003d79c18 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-08-24 12:27:42 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-new-deployment-7f5969cbc7" has successfully progressed.,LastUpdateTime:2023-08-24 12:27:42 +0000 UTC,LastTransitionTime:2023-08-24 12:27:40 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Aug 24 12:27:42.470: INFO: New ReplicaSet "test-new-deployment-7f5969cbc7" of Deployment "test-new-deployment": +&ReplicaSet{ObjectMeta:{test-new-deployment-7f5969cbc7 deployment-4918 e08390d1-409f-4ad4-8639-40e8d9a6aeea 20994 2 2023-08-24 12:27:40 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-new-deployment acb71e51-b7e0-4ba6-a924-3a0767ad3607 0xc001ed2ba7 0xc001ed2ba8}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"acb71e51-b7e0-4ba6-a924-3a0767ad3607\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 7f5969cbc7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc001ed2c48 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:27:42.509: INFO: Pod "test-new-deployment-7f5969cbc7-dsmqf" is available: +&Pod{ObjectMeta:{test-new-deployment-7f5969cbc7-dsmqf test-new-deployment-7f5969cbc7- deployment-4918 f40c9770-97f5-4e7f-9011-b2d3ad936ff7 20990 0 2023-08-24 12:27:40 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet test-new-deployment-7f5969cbc7 e08390d1-409f-4ad4-8639-40e8d9a6aeea 0xc00028ea97 0xc00028ea98}] [] [{kube-controller-manager Update v1 2023-08-24 12:27:40 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"e08390d1-409f-4ad4-8639-40e8d9a6aeea\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.88\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-rjbvc,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rjbvc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:40 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:40 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.88,StartTime:2023-08-24 12:27:40 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:27:41 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://918fecbcb2c2f6f7ac9698deb7fe0b4cb9558516906641466086e37f16bbd737,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.88,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:27:42.510: INFO: Pod "test-new-deployment-7f5969cbc7-ghvk6" is not available: +&Pod{ObjectMeta:{test-new-deployment-7f5969cbc7-ghvk6 test-new-deployment-7f5969cbc7- deployment-4918 69d73c7f-f2e1-4fa0-8e85-20d46958b5f2 20999 0 2023-08-24 12:27:42 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet test-new-deployment-7f5969cbc7 e08390d1-409f-4ad4-8639-40e8d9a6aeea 0xc00028f287 0xc00028f288}] [] [{kube-controller-manager Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"e08390d1-409f-4ad4-8639-40e8d9a6aeea\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-xfbb8,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xfbb8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment + test/e2e/framework/node/init/init.go:32 +Aug 24 12:27:42.511: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Deployment + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-apps] Deployment + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-apps] Deployment + tear down framework | framework.go:193 +STEP: Destroying namespace "deployment-4918" for this suite. 08/24/23 12:27:42.532 +------------------------------ +• [2.239 seconds] +[sig-apps] Deployment +test/e2e/apps/framework.go:23 + Deployment should have a working scale subresource [Conformance] + test/e2e/apps/deployment.go:150 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-apps] Deployment + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:27:40.303 + Aug 24 12:27:40.303: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename deployment 08/24/23 12:27:40.305 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:40.336 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:40.344 + [BeforeEach] [sig-apps] Deployment + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 + [It] Deployment should have a working scale subresource [Conformance] + test/e2e/apps/deployment.go:150 + Aug 24 12:27:40.349: INFO: Creating simple deployment test-new-deployment + Aug 24 12:27:40.385: INFO: deployment "test-new-deployment" doesn't have the required revision set + STEP: getting scale subresource 08/24/23 12:27:42.411 + STEP: updating a scale subresource 08/24/23 12:27:42.416 + STEP: verifying the deployment Spec.Replicas was modified 08/24/23 12:27:42.428 + STEP: Patch a scale subresource 08/24/23 12:27:42.435 + [AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 + Aug 24 12:27:42.464: INFO: Deployment "test-new-deployment": + &Deployment{ObjectMeta:{test-new-deployment deployment-4918 acb71e51-b7e0-4ba6-a924-3a0767ad3607 20995 3 2023-08-24 12:27:40 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 FieldsV1 {"f:spec":{"f:replicas":{}}} scale} {e2e.test Update apps/v1 2023-08-24 12:27:40 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*4,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003d79c18 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-08-24 12:27:42 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-new-deployment-7f5969cbc7" has successfully progressed.,LastUpdateTime:2023-08-24 12:27:42 +0000 UTC,LastTransitionTime:2023-08-24 12:27:40 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + + Aug 24 12:27:42.470: INFO: New ReplicaSet "test-new-deployment-7f5969cbc7" of Deployment "test-new-deployment": + &ReplicaSet{ObjectMeta:{test-new-deployment-7f5969cbc7 deployment-4918 e08390d1-409f-4ad4-8639-40e8d9a6aeea 20994 2 2023-08-24 12:27:40 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-new-deployment acb71e51-b7e0-4ba6-a924-3a0767ad3607 0xc001ed2ba7 0xc001ed2ba8}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"acb71e51-b7e0-4ba6-a924-3a0767ad3607\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 7f5969cbc7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc001ed2c48 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:27:42.509: INFO: Pod "test-new-deployment-7f5969cbc7-dsmqf" is available: + &Pod{ObjectMeta:{test-new-deployment-7f5969cbc7-dsmqf test-new-deployment-7f5969cbc7- deployment-4918 f40c9770-97f5-4e7f-9011-b2d3ad936ff7 20990 0 2023-08-24 12:27:40 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet test-new-deployment-7f5969cbc7 e08390d1-409f-4ad4-8639-40e8d9a6aeea 0xc00028ea97 0xc00028ea98}] [] [{kube-controller-manager Update v1 2023-08-24 12:27:40 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"e08390d1-409f-4ad4-8639-40e8d9a6aeea\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.88\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-rjbvc,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rjbvc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:40 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:40 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.88,StartTime:2023-08-24 12:27:40 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:27:41 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://918fecbcb2c2f6f7ac9698deb7fe0b4cb9558516906641466086e37f16bbd737,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.88,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:27:42.510: INFO: Pod "test-new-deployment-7f5969cbc7-ghvk6" is not available: + &Pod{ObjectMeta:{test-new-deployment-7f5969cbc7-ghvk6 test-new-deployment-7f5969cbc7- deployment-4918 69d73c7f-f2e1-4fa0-8e85-20d46958b5f2 20999 0 2023-08-24 12:27:42 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet test-new-deployment-7f5969cbc7 e08390d1-409f-4ad4-8639-40e8d9a6aeea 0xc00028f287 0xc00028f288}] [] [{kube-controller-manager Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"e08390d1-409f-4ad4-8639-40e8d9a6aeea\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-xfbb8,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xfbb8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} + [AfterEach] [sig-apps] Deployment + test/e2e/framework/node/init/init.go:32 + Aug 24 12:27:42.511: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Deployment + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-apps] Deployment + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-apps] Deployment + tear down framework | framework.go:193 + STEP: Destroying namespace "deployment-4918" for this suite. 08/24/23 12:27:42.532 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Watchers + should be able to restart watching from the last resource version observed by the previous watch [Conformance] + test/e2e/apimachinery/watch.go:191 +[BeforeEach] [sig-api-machinery] Watchers + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:27:42.558 +Aug 24 12:27:42.559: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename watch 08/24/23 12:27:42.561 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:42.602 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:42.607 +[BeforeEach] [sig-api-machinery] Watchers + test/e2e/framework/metrics/init/init.go:31 +[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance] + test/e2e/apimachinery/watch.go:191 +STEP: creating a watch on configmaps 08/24/23 12:27:42.614 +STEP: creating a new configmap 08/24/23 12:27:42.617 +STEP: modifying the configmap once 08/24/23 12:27:42.629 +STEP: closing the watch once it receives two notifications 08/24/23 12:27:42.644 +Aug 24 12:27:42.645: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-1076 ec9e324a-5585-4a1a-a813-0dc785b93a76 21017 0 2023-08-24 12:27:42 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 12:27:42.645: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-1076 ec9e324a-5585-4a1a-a813-0dc785b93a76 21018 0 2023-08-24 12:27:42 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying the configmap a second time, while the watch is closed 08/24/23 12:27:42.646 +STEP: creating a new watch on configmaps from the last resource version observed by the first watch 08/24/23 12:27:42.669 +STEP: deleting the configmap 08/24/23 12:27:42.672 +STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed 08/24/23 12:27:42.684 +Aug 24 12:27:42.685: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-1076 ec9e324a-5585-4a1a-a813-0dc785b93a76 21019 0 2023-08-24 12:27:42 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 12:27:42.685: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-1076 ec9e324a-5585-4a1a-a813-0dc785b93a76 21020 0 2023-08-24 12:27:42 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers + test/e2e/framework/node/init/init.go:32 +Aug 24 12:27:42.686: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Watchers + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-api-machinery] Watchers + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-api-machinery] Watchers + tear down framework | framework.go:193 +STEP: Destroying namespace "watch-1076" for this suite. 08/24/23 12:27:42.693 +------------------------------ +• [0.151 seconds] +[sig-api-machinery] Watchers +test/e2e/apimachinery/framework.go:23 + should be able to restart watching from the last resource version observed by the previous watch [Conformance] + test/e2e/apimachinery/watch.go:191 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods + [BeforeEach] [sig-api-machinery] Watchers set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:20:27.606 - Jul 29 16:20:27.606: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 16:20:27.613 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:27.647 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:27.652 - [BeforeEach] [sig-node] Pods + STEP: Creating a kubernetes client 08/24/23 12:27:42.558 + Aug 24 12:27:42.559: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename watch 08/24/23 12:27:42.561 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:42.602 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:42.607 + [BeforeEach] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 - [It] should be updated [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:344 - STEP: creating the pod 07/29/23 16:20:27.658 - STEP: submitting the pod to kubernetes 07/29/23 16:20:27.659 - Jul 29 16:20:27.675: INFO: Waiting up to 5m0s for pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d" in namespace "pods-5875" to be "running and ready" - Jul 29 16:20:27.684: INFO: Pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d": Phase="Pending", Reason="", readiness=false. Elapsed: 8.708587ms - Jul 29 16:20:27.684: INFO: The phase of Pod pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:20:29.694: INFO: Pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d": Phase="Running", Reason="", readiness=true. Elapsed: 2.01834146s - Jul 29 16:20:29.694: INFO: The phase of Pod pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d is Running (Ready = true) - Jul 29 16:20:29.694: INFO: Pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d" satisfied condition "running and ready" - STEP: verifying the pod is in kubernetes 07/29/23 16:20:29.701 - STEP: updating the pod 07/29/23 16:20:29.707 - Jul 29 16:20:30.228: INFO: Successfully updated pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d" - Jul 29 16:20:30.228: INFO: Waiting up to 5m0s for pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d" in namespace "pods-5875" to be "running" - Jul 29 16:20:30.233: INFO: Pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d": Phase="Running", Reason="", readiness=true. Elapsed: 4.847766ms - Jul 29 16:20:30.233: INFO: Pod "pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d" satisfied condition "running" - STEP: verifying the updated pod is in kubernetes 07/29/23 16:20:30.233 - Jul 29 16:20:30.255: INFO: Pod update OK - [AfterEach] [sig-node] Pods + [It] should be able to restart watching from the last resource version observed by the previous watch [Conformance] + test/e2e/apimachinery/watch.go:191 + STEP: creating a watch on configmaps 08/24/23 12:27:42.614 + STEP: creating a new configmap 08/24/23 12:27:42.617 + STEP: modifying the configmap once 08/24/23 12:27:42.629 + STEP: closing the watch once it receives two notifications 08/24/23 12:27:42.644 + Aug 24 12:27:42.645: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-1076 ec9e324a-5585-4a1a-a813-0dc785b93a76 21017 0 2023-08-24 12:27:42 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 12:27:42.645: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-1076 ec9e324a-5585-4a1a-a813-0dc785b93a76 21018 0 2023-08-24 12:27:42 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} + STEP: modifying the configmap a second time, while the watch is closed 08/24/23 12:27:42.646 + STEP: creating a new watch on configmaps from the last resource version observed by the first watch 08/24/23 12:27:42.669 + STEP: deleting the configmap 08/24/23 12:27:42.672 + STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed 08/24/23 12:27:42.684 + Aug 24 12:27:42.685: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-1076 ec9e324a-5585-4a1a-a813-0dc785b93a76 21019 0 2023-08-24 12:27:42 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 12:27:42.685: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-1076 ec9e324a-5585-4a1a-a813-0dc785b93a76 21020 0 2023-08-24 12:27:42 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} + [AfterEach] [sig-api-machinery] Watchers test/e2e/framework/node/init/init.go:32 - Jul 29 16:20:30.257: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods + Aug 24 12:27:42.686: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-api-machinery] Watchers dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-api-machinery] Watchers tear down framework | framework.go:193 - STEP: Destroying namespace "pods-5875" for this suite. 07/29/23 16:20:30.267 + STEP: Destroying namespace "watch-1076" for this suite. 08/24/23 12:27:42.693 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPredicates [Serial] - validates that NodeSelector is respected if not matching [Conformance] - test/e2e/scheduling/predicates.go:443 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] +[sig-apps] Deployment + should validate Deployment Status endpoints [Conformance] + test/e2e/apps/deployment.go:479 +[BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:20:30.286 -Jul 29 16:20:30.287: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sched-pred 07/29/23 16:20:30.289 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:30.324 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:30.329 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] +STEP: Creating a kubernetes client 08/24/23 12:27:42.712 +Aug 24 12:27:42.712: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename deployment 08/24/23 12:27:42.714 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:42.741 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:42.746 +[BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:97 -Jul 29 16:20:30.333: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready -Jul 29 16:20:30.351: INFO: Waiting for terminating namespaces to be deleted... -Jul 29 16:20:30.357: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-1 before test -Jul 29 16:20:30.375: INFO: cilium-cdv47 from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.375: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:20:30.375: INFO: cilium-node-init-jdrzm from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.375: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:20:30.375: INFO: coredns-787d4945fb-2xpvx from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.375: INFO: Container coredns ready: true, restart count 0 -Jul 29 16:20:30.375: INFO: coredns-787d4945fb-clg7z from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.375: INFO: Container coredns ready: true, restart count 0 -Jul 29 16:20:30.375: INFO: kube-addon-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.375: INFO: Container kube-addon-manager ready: true, restart count 0 -Jul 29 16:20:30.375: INFO: kube-apiserver-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.375: INFO: Container kube-apiserver ready: true, restart count 0 -Jul 29 16:20:30.375: INFO: kube-controller-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.375: INFO: Container kube-controller-manager ready: true, restart count 0 -Jul 29 16:20:30.375: INFO: kube-proxy-zc9m8 from kube-system started at 2023-07-29 15:13:58 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.375: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:20:30.375: INFO: kube-scheduler-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.375: INFO: Container kube-scheduler ready: true, restart count 0 -Jul 29 16:20:30.375: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:20:30.375: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:20:30.376: INFO: Container systemd-logs ready: true, restart count 0 -Jul 29 16:20:30.376: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-2 before test -Jul 29 16:20:30.392: INFO: cilium-kxphw from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.393: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:20:30.393: INFO: cilium-node-init-fqx5t from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.393: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:20:30.393: INFO: cilium-operator-8c499d9f6-hfgjd from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.393: INFO: Container cilium-operator ready: true, restart count 0 -Jul 29 16:20:30.393: INFO: kube-addon-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.393: INFO: Container kube-addon-manager ready: true, restart count 0 -Jul 29 16:20:30.394: INFO: kube-apiserver-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.394: INFO: Container kube-apiserver ready: true, restart count 0 -Jul 29 16:20:30.394: INFO: kube-controller-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.394: INFO: Container kube-controller-manager ready: true, restart count 0 -Jul 29 16:20:30.394: INFO: kube-proxy-gzqkk from kube-system started at 2023-07-29 15:14:12 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.394: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:20:30.394: INFO: kube-scheduler-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.394: INFO: Container kube-scheduler ready: true, restart count 0 -Jul 29 16:20:30.394: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-xnwv4 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:20:30.394: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:20:30.395: INFO: Container systemd-logs ready: true, restart count 0 -Jul 29 16:20:30.395: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-3 before test -Jul 29 16:20:30.410: INFO: cilium-node-init-9ghzk from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.410: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:20:30.410: INFO: cilium-v9c5p from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.410: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:20:30.411: INFO: kube-proxy-v77tx from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.411: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:20:30.411: INFO: pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d from pods-5875 started at 2023-07-29 16:20:27 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.411: INFO: Container pause ready: true, restart count 0 -Jul 29 16:20:30.411: INFO: sonobuoy from sonobuoy started at 2023-07-29 15:28:59 +0000 UTC (1 container statuses recorded) -Jul 29 16:20:30.411: INFO: Container kube-sonobuoy ready: true, restart count 0 -Jul 29 16:20:30.411: INFO: sonobuoy-e2e-job-7bf00df102b6496e from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:20:30.411: INFO: Container e2e ready: true, restart count 0 -Jul 29 16:20:30.411: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:20:30.411: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-k84f6 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:20:30.411: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:20:30.411: INFO: Container systemd-logs ready: true, restart count 0 -[It] validates that NodeSelector is respected if not matching [Conformance] - test/e2e/scheduling/predicates.go:443 -STEP: Trying to schedule Pod with nonempty NodeSelector. 07/29/23 16:20:30.411 -STEP: Considering event: -Type = [Warning], Name = [restricted-pod.1776630ffd625851], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 node(s) didn't match Pod's node affinity/selector. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling..] 07/29/23 16:20:30.467 -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] +[BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 +[It] should validate Deployment Status endpoints [Conformance] + test/e2e/apps/deployment.go:479 +STEP: creating a Deployment 08/24/23 12:27:42.772 +Aug 24 12:27:42.772: INFO: Creating simple deployment test-deployment-4skvs +Aug 24 12:27:42.794: INFO: deployment "test-deployment-4skvs" doesn't have the required revision set +STEP: Getting /status 08/24/23 12:27:44.818 +Aug 24 12:27:44.826: INFO: Deployment test-deployment-4skvs has Conditions: [{Available True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:44 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-4skvs-54bc444df" has successfully progressed.}] +STEP: updating Deployment Status 08/24/23 12:27:44.826 +Aug 24 12:27:44.848: INFO: updatedStatus.Conditions: []v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 27, 44, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 27, 44, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 27, 44, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 27, 42, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"test-deployment-4skvs-54bc444df\" has successfully progressed."}, v1.DeploymentCondition{Type:"StatusUpdate", Status:"True", LastUpdateTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the Deployment status to be updated 08/24/23 12:27:44.849 +Aug 24 12:27:44.853: INFO: Observed &Deployment event: ADDED +Aug 24 12:27:44.853: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-4skvs-54bc444df"} +Aug 24 12:27:44.853: INFO: Observed &Deployment event: MODIFIED +Aug 24 12:27:44.853: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-4skvs-54bc444df"} +Aug 24 12:27:44.854: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} +Aug 24 12:27:44.854: INFO: Observed &Deployment event: MODIFIED +Aug 24 12:27:44.854: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} +Aug 24 12:27:44.854: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC ReplicaSetUpdated ReplicaSet "test-deployment-4skvs-54bc444df" is progressing.} +Aug 24 12:27:44.855: INFO: Observed &Deployment event: MODIFIED +Aug 24 12:27:44.855: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:44 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} +Aug 24 12:27:44.855: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-4skvs-54bc444df" has successfully progressed.} +Aug 24 12:27:44.855: INFO: Observed &Deployment event: MODIFIED +Aug 24 12:27:44.856: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:44 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} +Aug 24 12:27:44.856: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-4skvs-54bc444df" has successfully progressed.} +Aug 24 12:27:44.856: INFO: Found Deployment test-deployment-4skvs in namespace deployment-7218 with labels: map[e2e:testing name:httpd] annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} +Aug 24 12:27:44.856: INFO: Deployment test-deployment-4skvs has an updated status +STEP: patching the Statefulset Status 08/24/23 12:27:44.856 +Aug 24 12:27:44.856: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} +Aug 24 12:27:44.869: INFO: Patched status conditions: []v1.DeploymentCondition{v1.DeploymentCondition{Type:"StatusPatched", Status:"True", LastUpdateTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} +STEP: watching for the Deployment status to be patched 08/24/23 12:27:44.869 +Aug 24 12:27:44.873: INFO: Observed &Deployment event: ADDED +Aug 24 12:27:44.874: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-4skvs-54bc444df"} +Aug 24 12:27:44.874: INFO: Observed &Deployment event: MODIFIED +Aug 24 12:27:44.874: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-4skvs-54bc444df"} +Aug 24 12:27:44.874: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} +Aug 24 12:27:44.874: INFO: Observed &Deployment event: MODIFIED +Aug 24 12:27:44.875: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} +Aug 24 12:27:44.875: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC ReplicaSetUpdated ReplicaSet "test-deployment-4skvs-54bc444df" is progressing.} +Aug 24 12:27:44.875: INFO: Observed &Deployment event: MODIFIED +Aug 24 12:27:44.875: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:44 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} +Aug 24 12:27:44.876: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-4skvs-54bc444df" has successfully progressed.} +Aug 24 12:27:44.876: INFO: Observed &Deployment event: MODIFIED +Aug 24 12:27:44.876: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:44 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} +Aug 24 12:27:44.876: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-4skvs-54bc444df" has successfully progressed.} +Aug 24 12:27:44.876: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} +Aug 24 12:27:44.877: INFO: Observed &Deployment event: MODIFIED +Aug 24 12:27:44.877: INFO: Found deployment test-deployment-4skvs in namespace deployment-7218 with labels: map[e2e:testing name:httpd] annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusPatched True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC } +Aug 24 12:27:44.877: INFO: Deployment test-deployment-4skvs has a patched status +[AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 +Aug 24 12:27:44.889: INFO: Deployment "test-deployment-4skvs": +&Deployment{ObjectMeta:{test-deployment-4skvs deployment-7218 f6ba29ac-5823-408c-bc41-89372e6452e5 21053 1 2023-08-24 12:27:42 +0000 UTC map[e2e:testing name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {e2e.test Update apps/v1 2023-08-24 12:27:44 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"StatusPatched\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update apps/v1 2023-08-24 12:27:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{e2e: testing,name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[e2e:testing name:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0050a73d8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:StatusPatched,Status:True,Reason:,Message:,LastUpdateTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:0001-01-01 00:00:00 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:FoundNewReplicaSet,Message:Found new replica set "test-deployment-4skvs-54bc444df",LastUpdateTime:2023-08-24 12:27:44 +0000 UTC,LastTransitionTime:2023-08-24 12:27:44 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Aug 24 12:27:44.896: INFO: New ReplicaSet "test-deployment-4skvs-54bc444df" of Deployment "test-deployment-4skvs": +&ReplicaSet{ObjectMeta:{test-deployment-4skvs-54bc444df deployment-7218 d849b5e1-12cb-486c-876c-146e16b80a09 21049 1 2023-08-24 12:27:42 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment-4skvs f6ba29ac-5823-408c-bc41-89372e6452e5 0xc004b5e880 0xc004b5e881}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"f6ba29ac-5823-408c-bc41-89372e6452e5\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:27:44 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{e2e: testing,name: httpd,pod-template-hash: 54bc444df,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004b5e928 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:27:44.907: INFO: Pod "test-deployment-4skvs-54bc444df-hlclx" is available: +&Pod{ObjectMeta:{test-deployment-4skvs-54bc444df-hlclx test-deployment-4skvs-54bc444df- deployment-7218 50c5f5b0-a375-420d-a63c-9fcdee59d57e 21048 0 2023-08-24 12:27:42 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[] [{apps/v1 ReplicaSet test-deployment-4skvs-54bc444df d849b5e1-12cb-486c-876c-146e16b80a09 0xc004b5ecf0 0xc004b5ecf1}] [] [{kube-controller-manager Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"d849b5e1-12cb-486c-876c-146e16b80a09\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:27:44 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.19\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-bblgs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bblgs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:44 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:44 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.19,StartTime:2023-08-24 12:27:42 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:27:43 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://4f94a4347c3752ae3e32f6fbeea00b396ca95bd92de842b23b5800e386586382,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.19,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 -Jul 29 16:20:31.463: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:88 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] +Aug 24 12:27:44.909: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] +[DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] +[DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 -STEP: Destroying namespace "sched-pred-4740" for this suite. 07/29/23 16:20:31.473 +STEP: Destroying namespace "deployment-7218" for this suite. 08/24/23 12:27:44.918 ------------------------------ -• [1.198 seconds] -[sig-scheduling] SchedulerPredicates [Serial] -test/e2e/scheduling/framework.go:40 - validates that NodeSelector is respected if not matching [Conformance] - test/e2e/scheduling/predicates.go:443 +• [2.218 seconds] +[sig-apps] Deployment +test/e2e/apps/framework.go:23 + should validate Deployment Status endpoints [Conformance] + test/e2e/apps/deployment.go:479 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + [BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:20:30.286 - Jul 29 16:20:30.287: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sched-pred 07/29/23 16:20:30.289 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:30.324 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:30.329 - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + STEP: Creating a kubernetes client 08/24/23 12:27:42.712 + Aug 24 12:27:42.712: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename deployment 08/24/23 12:27:42.714 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:42.741 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:42.746 + [BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:97 - Jul 29 16:20:30.333: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready - Jul 29 16:20:30.351: INFO: Waiting for terminating namespaces to be deleted... - Jul 29 16:20:30.357: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-1 before test - Jul 29 16:20:30.375: INFO: cilium-cdv47 from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.375: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:20:30.375: INFO: cilium-node-init-jdrzm from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.375: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:20:30.375: INFO: coredns-787d4945fb-2xpvx from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.375: INFO: Container coredns ready: true, restart count 0 - Jul 29 16:20:30.375: INFO: coredns-787d4945fb-clg7z from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.375: INFO: Container coredns ready: true, restart count 0 - Jul 29 16:20:30.375: INFO: kube-addon-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.375: INFO: Container kube-addon-manager ready: true, restart count 0 - Jul 29 16:20:30.375: INFO: kube-apiserver-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.375: INFO: Container kube-apiserver ready: true, restart count 0 - Jul 29 16:20:30.375: INFO: kube-controller-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.375: INFO: Container kube-controller-manager ready: true, restart count 0 - Jul 29 16:20:30.375: INFO: kube-proxy-zc9m8 from kube-system started at 2023-07-29 15:13:58 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.375: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:20:30.375: INFO: kube-scheduler-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.375: INFO: Container kube-scheduler ready: true, restart count 0 - Jul 29 16:20:30.375: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:20:30.375: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:20:30.376: INFO: Container systemd-logs ready: true, restart count 0 - Jul 29 16:20:30.376: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-2 before test - Jul 29 16:20:30.392: INFO: cilium-kxphw from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.393: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:20:30.393: INFO: cilium-node-init-fqx5t from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.393: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:20:30.393: INFO: cilium-operator-8c499d9f6-hfgjd from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.393: INFO: Container cilium-operator ready: true, restart count 0 - Jul 29 16:20:30.393: INFO: kube-addon-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.393: INFO: Container kube-addon-manager ready: true, restart count 0 - Jul 29 16:20:30.394: INFO: kube-apiserver-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.394: INFO: Container kube-apiserver ready: true, restart count 0 - Jul 29 16:20:30.394: INFO: kube-controller-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.394: INFO: Container kube-controller-manager ready: true, restart count 0 - Jul 29 16:20:30.394: INFO: kube-proxy-gzqkk from kube-system started at 2023-07-29 15:14:12 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.394: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:20:30.394: INFO: kube-scheduler-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.394: INFO: Container kube-scheduler ready: true, restart count 0 - Jul 29 16:20:30.394: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-xnwv4 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:20:30.394: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:20:30.395: INFO: Container systemd-logs ready: true, restart count 0 - Jul 29 16:20:30.395: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-3 before test - Jul 29 16:20:30.410: INFO: cilium-node-init-9ghzk from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.410: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:20:30.410: INFO: cilium-v9c5p from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.410: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:20:30.411: INFO: kube-proxy-v77tx from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.411: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:20:30.411: INFO: pod-update-f9267287-f279-4e21-bce2-a6e495e0ea5d from pods-5875 started at 2023-07-29 16:20:27 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.411: INFO: Container pause ready: true, restart count 0 - Jul 29 16:20:30.411: INFO: sonobuoy from sonobuoy started at 2023-07-29 15:28:59 +0000 UTC (1 container statuses recorded) - Jul 29 16:20:30.411: INFO: Container kube-sonobuoy ready: true, restart count 0 - Jul 29 16:20:30.411: INFO: sonobuoy-e2e-job-7bf00df102b6496e from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:20:30.411: INFO: Container e2e ready: true, restart count 0 - Jul 29 16:20:30.411: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:20:30.411: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-k84f6 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:20:30.411: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:20:30.411: INFO: Container systemd-logs ready: true, restart count 0 - [It] validates that NodeSelector is respected if not matching [Conformance] - test/e2e/scheduling/predicates.go:443 - STEP: Trying to schedule Pod with nonempty NodeSelector. 07/29/23 16:20:30.411 - STEP: Considering event: - Type = [Warning], Name = [restricted-pod.1776630ffd625851], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 node(s) didn't match Pod's node affinity/selector. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling..] 07/29/23 16:20:30.467 - [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + [BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 + [It] should validate Deployment Status endpoints [Conformance] + test/e2e/apps/deployment.go:479 + STEP: creating a Deployment 08/24/23 12:27:42.772 + Aug 24 12:27:42.772: INFO: Creating simple deployment test-deployment-4skvs + Aug 24 12:27:42.794: INFO: deployment "test-deployment-4skvs" doesn't have the required revision set + STEP: Getting /status 08/24/23 12:27:44.818 + Aug 24 12:27:44.826: INFO: Deployment test-deployment-4skvs has Conditions: [{Available True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:44 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-4skvs-54bc444df" has successfully progressed.}] + STEP: updating Deployment Status 08/24/23 12:27:44.826 + Aug 24 12:27:44.848: INFO: updatedStatus.Conditions: []v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 27, 44, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 27, 44, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.August, 24, 12, 27, 44, 0, time.Local), LastTransitionTime:time.Date(2023, time.August, 24, 12, 27, 42, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"test-deployment-4skvs-54bc444df\" has successfully progressed."}, v1.DeploymentCondition{Type:"StatusUpdate", Status:"True", LastUpdateTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} + STEP: watching for the Deployment status to be updated 08/24/23 12:27:44.849 + Aug 24 12:27:44.853: INFO: Observed &Deployment event: ADDED + Aug 24 12:27:44.853: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-4skvs-54bc444df"} + Aug 24 12:27:44.853: INFO: Observed &Deployment event: MODIFIED + Aug 24 12:27:44.853: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-4skvs-54bc444df"} + Aug 24 12:27:44.854: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} + Aug 24 12:27:44.854: INFO: Observed &Deployment event: MODIFIED + Aug 24 12:27:44.854: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} + Aug 24 12:27:44.854: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC ReplicaSetUpdated ReplicaSet "test-deployment-4skvs-54bc444df" is progressing.} + Aug 24 12:27:44.855: INFO: Observed &Deployment event: MODIFIED + Aug 24 12:27:44.855: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:44 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} + Aug 24 12:27:44.855: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-4skvs-54bc444df" has successfully progressed.} + Aug 24 12:27:44.855: INFO: Observed &Deployment event: MODIFIED + Aug 24 12:27:44.856: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:44 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} + Aug 24 12:27:44.856: INFO: Observed Deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-4skvs-54bc444df" has successfully progressed.} + Aug 24 12:27:44.856: INFO: Found Deployment test-deployment-4skvs in namespace deployment-7218 with labels: map[e2e:testing name:httpd] annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} + Aug 24 12:27:44.856: INFO: Deployment test-deployment-4skvs has an updated status + STEP: patching the Statefulset Status 08/24/23 12:27:44.856 + Aug 24 12:27:44.856: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} + Aug 24 12:27:44.869: INFO: Patched status conditions: []v1.DeploymentCondition{v1.DeploymentCondition{Type:"StatusPatched", Status:"True", LastUpdateTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} + STEP: watching for the Deployment status to be patched 08/24/23 12:27:44.869 + Aug 24 12:27:44.873: INFO: Observed &Deployment event: ADDED + Aug 24 12:27:44.874: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-4skvs-54bc444df"} + Aug 24 12:27:44.874: INFO: Observed &Deployment event: MODIFIED + Aug 24 12:27:44.874: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-4skvs-54bc444df"} + Aug 24 12:27:44.874: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} + Aug 24 12:27:44.874: INFO: Observed &Deployment event: MODIFIED + Aug 24 12:27:44.875: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} + Aug 24 12:27:44.875: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:42 +0000 UTC 2023-08-24 12:27:42 +0000 UTC ReplicaSetUpdated ReplicaSet "test-deployment-4skvs-54bc444df" is progressing.} + Aug 24 12:27:44.875: INFO: Observed &Deployment event: MODIFIED + Aug 24 12:27:44.875: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:44 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} + Aug 24 12:27:44.876: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-4skvs-54bc444df" has successfully progressed.} + Aug 24 12:27:44.876: INFO: Observed &Deployment event: MODIFIED + Aug 24 12:27:44.876: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:44 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} + Aug 24 12:27:44.876: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-08-24 12:27:44 +0000 UTC 2023-08-24 12:27:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-4skvs-54bc444df" has successfully progressed.} + Aug 24 12:27:44.876: INFO: Observed deployment test-deployment-4skvs in namespace deployment-7218 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} + Aug 24 12:27:44.877: INFO: Observed &Deployment event: MODIFIED + Aug 24 12:27:44.877: INFO: Found deployment test-deployment-4skvs in namespace deployment-7218 with labels: map[e2e:testing name:httpd] annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusPatched True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC } + Aug 24 12:27:44.877: INFO: Deployment test-deployment-4skvs has a patched status + [AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 + Aug 24 12:27:44.889: INFO: Deployment "test-deployment-4skvs": + &Deployment{ObjectMeta:{test-deployment-4skvs deployment-7218 f6ba29ac-5823-408c-bc41-89372e6452e5 21053 1 2023-08-24 12:27:42 +0000 UTC map[e2e:testing name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {e2e.test Update apps/v1 2023-08-24 12:27:44 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"StatusPatched\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update apps/v1 2023-08-24 12:27:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{e2e: testing,name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[e2e:testing name:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0050a73d8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:StatusPatched,Status:True,Reason:,Message:,LastUpdateTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:0001-01-01 00:00:00 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:FoundNewReplicaSet,Message:Found new replica set "test-deployment-4skvs-54bc444df",LastUpdateTime:2023-08-24 12:27:44 +0000 UTC,LastTransitionTime:2023-08-24 12:27:44 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + + Aug 24 12:27:44.896: INFO: New ReplicaSet "test-deployment-4skvs-54bc444df" of Deployment "test-deployment-4skvs": + &ReplicaSet{ObjectMeta:{test-deployment-4skvs-54bc444df deployment-7218 d849b5e1-12cb-486c-876c-146e16b80a09 21049 1 2023-08-24 12:27:42 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment-4skvs f6ba29ac-5823-408c-bc41-89372e6452e5 0xc004b5e880 0xc004b5e881}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"f6ba29ac-5823-408c-bc41-89372e6452e5\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:27:44 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{e2e: testing,name: httpd,pod-template-hash: 54bc444df,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004b5e928 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:27:44.907: INFO: Pod "test-deployment-4skvs-54bc444df-hlclx" is available: + &Pod{ObjectMeta:{test-deployment-4skvs-54bc444df-hlclx test-deployment-4skvs-54bc444df- deployment-7218 50c5f5b0-a375-420d-a63c-9fcdee59d57e 21048 0 2023-08-24 12:27:42 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[] [{apps/v1 ReplicaSet test-deployment-4skvs-54bc444df d849b5e1-12cb-486c-876c-146e16b80a09 0xc004b5ecf0 0xc004b5ecf1}] [] [{kube-controller-manager Update v1 2023-08-24 12:27:42 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"d849b5e1-12cb-486c-876c-146e16b80a09\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:27:44 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.19\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-bblgs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bblgs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:44 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:44 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:27:42 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.19,StartTime:2023-08-24 12:27:42 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:27:43 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://4f94a4347c3752ae3e32f6fbeea00b396ca95bd92de842b23b5800e386586382,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.19,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + [AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 - Jul 29 16:20:31.463: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:88 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + Aug 24 12:27:44.909: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + [DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + [DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 - STEP: Destroying namespace "sched-pred-4740" for this suite. 07/29/23 16:20:31.473 + STEP: Destroying namespace "deployment-7218" for this suite. 08/24/23 12:27:44.918 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Container Runtime blackbox test on terminated container - should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:248 -[BeforeEach] [sig-node] Container Runtime +[sig-apps] CronJob + should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] + test/e2e/apps/cronjob.go:124 +[BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:20:31.485 -Jul 29 16:20:31.485: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-runtime 07/29/23 16:20:31.487 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:31.517 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:31.522 -[BeforeEach] [sig-node] Container Runtime +STEP: Creating a kubernetes client 08/24/23 12:27:44.945 +Aug 24 12:27:44.945: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename cronjob 08/24/23 12:27:44.947 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:44.977 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:44.981 +[BeforeEach] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:31 -[It] should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:248 -STEP: create the container 07/29/23 16:20:31.527 -STEP: wait for the container to reach Succeeded 07/29/23 16:20:31.542 -STEP: get the container status 07/29/23 16:20:35.575 -STEP: the container should be terminated 07/29/23 16:20:35.582 -STEP: the termination message should be set 07/29/23 16:20:35.582 -Jul 29 16:20:35.582: INFO: Expected: &{OK} to match Container's Termination Message: OK -- -STEP: delete the container 07/29/23 16:20:35.582 -[AfterEach] [sig-node] Container Runtime +[It] should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] + test/e2e/apps/cronjob.go:124 +STEP: Creating a ForbidConcurrent cronjob 08/24/23 12:27:44.987 +STEP: Ensuring a job is scheduled 08/24/23 12:27:44.998 +STEP: Ensuring exactly one is scheduled 08/24/23 12:28:01.007 +STEP: Ensuring exactly one running job exists by listing jobs explicitly 08/24/23 12:28:01.015 +STEP: Ensuring no more jobs are scheduled 08/24/23 12:28:01.023 +STEP: Removing cronjob 08/24/23 12:33:01.037 +[AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 -Jul 29 16:20:35.623: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Container Runtime +Aug 24 12:33:01.054: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Container Runtime +[DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Container Runtime +[DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 -STEP: Destroying namespace "container-runtime-3660" for this suite. 07/29/23 16:20:35.641 +STEP: Destroying namespace "cronjob-6531" for this suite. 08/24/23 12:33:01.067 ------------------------------ -• [4.179 seconds] -[sig-node] Container Runtime -test/e2e/common/node/framework.go:23 - blackbox test - test/e2e/common/node/runtime.go:44 - on terminated container - test/e2e/common/node/runtime.go:137 - should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:248 +• [SLOW TEST] [316.134 seconds] +[sig-apps] CronJob +test/e2e/apps/framework.go:23 + should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] + test/e2e/apps/cronjob.go:124 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Container Runtime + [BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:20:31.485 - Jul 29 16:20:31.485: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-runtime 07/29/23 16:20:31.487 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:31.517 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:31.522 - [BeforeEach] [sig-node] Container Runtime + STEP: Creating a kubernetes client 08/24/23 12:27:44.945 + Aug 24 12:27:44.945: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename cronjob 08/24/23 12:27:44.947 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:27:44.977 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:27:44.981 + [BeforeEach] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:31 - [It] should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:248 - STEP: create the container 07/29/23 16:20:31.527 - STEP: wait for the container to reach Succeeded 07/29/23 16:20:31.542 - STEP: get the container status 07/29/23 16:20:35.575 - STEP: the container should be terminated 07/29/23 16:20:35.582 - STEP: the termination message should be set 07/29/23 16:20:35.582 - Jul 29 16:20:35.582: INFO: Expected: &{OK} to match Container's Termination Message: OK -- - STEP: delete the container 07/29/23 16:20:35.582 - [AfterEach] [sig-node] Container Runtime + [It] should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] + test/e2e/apps/cronjob.go:124 + STEP: Creating a ForbidConcurrent cronjob 08/24/23 12:27:44.987 + STEP: Ensuring a job is scheduled 08/24/23 12:27:44.998 + STEP: Ensuring exactly one is scheduled 08/24/23 12:28:01.007 + STEP: Ensuring exactly one running job exists by listing jobs explicitly 08/24/23 12:28:01.015 + STEP: Ensuring no more jobs are scheduled 08/24/23 12:28:01.023 + STEP: Removing cronjob 08/24/23 12:33:01.037 + [AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 - Jul 29 16:20:35.623: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Container Runtime + Aug 24 12:33:01.054: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Container Runtime + [DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Container Runtime + [DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 - STEP: Destroying namespace "container-runtime-3660" for this suite. 07/29/23 16:20:35.641 + STEP: Destroying namespace "cronjob-6531" for this suite. 08/24/23 12:33:01.067 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a service. [Conformance] - test/e2e/apimachinery/resource_quota.go:100 -[BeforeEach] [sig-api-machinery] ResourceQuota +[sig-network] EndpointSlice + should have Endpoints and EndpointSlices pointing to API Server [Conformance] + test/e2e/network/endpointslice.go:66 +[BeforeEach] [sig-network] EndpointSlice set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:20:35.668 -Jul 29 16:20:35.669: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 16:20:35.67 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:35.77 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:35.779 -[BeforeEach] [sig-api-machinery] ResourceQuota +STEP: Creating a kubernetes client 08/24/23 12:33:01.095 +Aug 24 12:33:01.095: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename endpointslice 08/24/23 12:33:01.098 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:01.148 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:01.154 +[BeforeEach] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:31 -[It] should create a ResourceQuota and capture the life of a service. [Conformance] - test/e2e/apimachinery/resource_quota.go:100 -STEP: Counting existing ResourceQuota 07/29/23 16:20:35.786 -STEP: Creating a ResourceQuota 07/29/23 16:20:40.794 -STEP: Ensuring resource quota status is calculated 07/29/23 16:20:40.807 -STEP: Creating a Service 07/29/23 16:20:42.816 -STEP: Creating a NodePort Service 07/29/23 16:20:42.847 -STEP: Not allowing a LoadBalancer Service with NodePort to be created that exceeds remaining quota 07/29/23 16:20:42.893 -STEP: Ensuring resource quota status captures service creation 07/29/23 16:20:42.961 -STEP: Deleting Services 07/29/23 16:20:44.968 -STEP: Ensuring resource quota status released usage 07/29/23 16:20:45.036 -[AfterEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-network] EndpointSlice + test/e2e/network/endpointslice.go:52 +[It] should have Endpoints and EndpointSlices pointing to API Server [Conformance] + test/e2e/network/endpointslice.go:66 +Aug 24 12:33:01.206: INFO: Endpoints addresses: [192.168.121.111 192.168.121.127] , ports: [6443] +Aug 24 12:33:01.206: INFO: EndpointSlices addresses: [192.168.121.111 192.168.121.127] , ports: [6443] +[AfterEach] [sig-network] EndpointSlice test/e2e/framework/node/init/init.go:32 -Jul 29 16:20:47.045: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +Aug 24 12:33:01.206: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-network] EndpointSlice dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-network] EndpointSlice tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-7469" for this suite. 07/29/23 16:20:47.056 +STEP: Destroying namespace "endpointslice-9154" for this suite. 08/24/23 12:33:01.216 ------------------------------ -• [SLOW TEST] [11.402 seconds] -[sig-api-machinery] ResourceQuota -test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a service. [Conformance] - test/e2e/apimachinery/resource_quota.go:100 +• [0.143 seconds] +[sig-network] EndpointSlice +test/e2e/network/common/framework.go:23 + should have Endpoints and EndpointSlices pointing to API Server [Conformance] + test/e2e/network/endpointslice.go:66 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-network] EndpointSlice set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:20:35.668 - Jul 29 16:20:35.669: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 16:20:35.67 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:35.77 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:35.779 - [BeforeEach] [sig-api-machinery] ResourceQuota + STEP: Creating a kubernetes client 08/24/23 12:33:01.095 + Aug 24 12:33:01.095: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename endpointslice 08/24/23 12:33:01.098 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:01.148 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:01.154 + [BeforeEach] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:31 - [It] should create a ResourceQuota and capture the life of a service. [Conformance] - test/e2e/apimachinery/resource_quota.go:100 - STEP: Counting existing ResourceQuota 07/29/23 16:20:35.786 - STEP: Creating a ResourceQuota 07/29/23 16:20:40.794 - STEP: Ensuring resource quota status is calculated 07/29/23 16:20:40.807 - STEP: Creating a Service 07/29/23 16:20:42.816 - STEP: Creating a NodePort Service 07/29/23 16:20:42.847 - STEP: Not allowing a LoadBalancer Service with NodePort to be created that exceeds remaining quota 07/29/23 16:20:42.893 - STEP: Ensuring resource quota status captures service creation 07/29/23 16:20:42.961 - STEP: Deleting Services 07/29/23 16:20:44.968 - STEP: Ensuring resource quota status released usage 07/29/23 16:20:45.036 - [AfterEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-network] EndpointSlice + test/e2e/network/endpointslice.go:52 + [It] should have Endpoints and EndpointSlices pointing to API Server [Conformance] + test/e2e/network/endpointslice.go:66 + Aug 24 12:33:01.206: INFO: Endpoints addresses: [192.168.121.111 192.168.121.127] , ports: [6443] + Aug 24 12:33:01.206: INFO: EndpointSlices addresses: [192.168.121.111 192.168.121.127] , ports: [6443] + [AfterEach] [sig-network] EndpointSlice test/e2e/framework/node/init/init.go:32 - Jul 29 16:20:47.045: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + Aug 24 12:33:01.206: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-network] EndpointSlice dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-network] EndpointSlice tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-7469" for this suite. 07/29/23 16:20:47.056 + STEP: Destroying namespace "endpointslice-9154" for this suite. 08/24/23 12:33:01.216 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSS +SSSSSSSSSS ------------------------------ -[sig-node] ConfigMap - should fail to create ConfigMap with empty key [Conformance] - test/e2e/common/node/configmap.go:138 -[BeforeEach] [sig-node] ConfigMap +[sig-cli] Kubectl client Kubectl version + should check is all data is printed [Conformance] + test/e2e/kubectl/kubectl.go:1685 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:20:47.072 -Jul 29 16:20:47.072: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:20:47.075 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:47.1 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:47.105 -[BeforeEach] [sig-node] ConfigMap +STEP: Creating a kubernetes client 08/24/23 12:33:01.242 +Aug 24 12:33:01.243: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:33:01.245 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:01.278 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:01.283 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[It] should fail to create ConfigMap with empty key [Conformance] - test/e2e/common/node/configmap.go:138 -STEP: Creating configMap that has name configmap-test-emptyKey-7009ca45-e55b-4285-88fb-c0c417cf1c7e 07/29/23 16:20:47.11 -[AfterEach] [sig-node] ConfigMap +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[It] should check is all data is printed [Conformance] + test/e2e/kubectl/kubectl.go:1685 +Aug 24 12:33:01.289: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5078 version' +Aug 24 12:33:01.468: INFO: stderr: "WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version.\n" +Aug 24 12:33:01.468: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"26\", GitVersion:\"v1.26.8\", GitCommit:\"395f0a2fdc940aeb9ab88849e8fa4321decbf6e1\", GitTreeState:\"clean\", BuildDate:\"2023-08-24T00:50:44Z\", GoVersion:\"go1.20.7\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nKustomize Version: v4.5.7\nServer Version: version.Info{Major:\"1\", Minor:\"26\", GitVersion:\"v1.26.8\", GitCommit:\"395f0a2fdc940aeb9ab88849e8fa4321decbf6e1\", GitTreeState:\"clean\", BuildDate:\"2023-08-24T00:43:07Z\", GoVersion:\"go1.20.7\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 16:20:47.113: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] ConfigMap +Aug 24 12:33:01.469: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] ConfigMap +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] ConfigMap +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-2095" for this suite. 07/29/23 16:20:47.123 +STEP: Destroying namespace "kubectl-5078" for this suite. 08/24/23 12:33:01.476 ------------------------------ -• [0.064 seconds] -[sig-node] ConfigMap -test/e2e/common/node/framework.go:23 - should fail to create ConfigMap with empty key [Conformance] - test/e2e/common/node/configmap.go:138 +• [0.247 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Kubectl version + test/e2e/kubectl/kubectl.go:1679 + should check is all data is printed [Conformance] + test/e2e/kubectl/kubectl.go:1685 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] ConfigMap + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:20:47.072 - Jul 29 16:20:47.072: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:20:47.075 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:47.1 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:47.105 - [BeforeEach] [sig-node] ConfigMap + STEP: Creating a kubernetes client 08/24/23 12:33:01.242 + Aug 24 12:33:01.243: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:33:01.245 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:01.278 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:01.283 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [It] should fail to create ConfigMap with empty key [Conformance] - test/e2e/common/node/configmap.go:138 - STEP: Creating configMap that has name configmap-test-emptyKey-7009ca45-e55b-4285-88fb-c0c417cf1c7e 07/29/23 16:20:47.11 - [AfterEach] [sig-node] ConfigMap + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [It] should check is all data is printed [Conformance] + test/e2e/kubectl/kubectl.go:1685 + Aug 24 12:33:01.289: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5078 version' + Aug 24 12:33:01.468: INFO: stderr: "WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version.\n" + Aug 24 12:33:01.468: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"26\", GitVersion:\"v1.26.8\", GitCommit:\"395f0a2fdc940aeb9ab88849e8fa4321decbf6e1\", GitTreeState:\"clean\", BuildDate:\"2023-08-24T00:50:44Z\", GoVersion:\"go1.20.7\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nKustomize Version: v4.5.7\nServer Version: version.Info{Major:\"1\", Minor:\"26\", GitVersion:\"v1.26.8\", GitCommit:\"395f0a2fdc940aeb9ab88849e8fa4321decbf6e1\", GitTreeState:\"clean\", BuildDate:\"2023-08-24T00:43:07Z\", GoVersion:\"go1.20.7\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 16:20:47.113: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] ConfigMap + Aug 24 12:33:01.469: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] ConfigMap + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] ConfigMap + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-2095" for this suite. 07/29/23 16:20:47.123 + STEP: Destroying namespace "kubectl-5078" for this suite. 08/24/23 12:33:01.476 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSS ------------------------------- -[sig-storage] Downward API volume - should provide container's cpu limit [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:193 -[BeforeEach] [sig-storage] Downward API volume +SSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl api-versions + should check if v1 is in available api versions [Conformance] + test/e2e/kubectl/kubectl.go:824 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:20:47.141 -Jul 29 16:20:47.141: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 16:20:47.142 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:47.17 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:47.174 -[BeforeEach] [sig-storage] Downward API volume +STEP: Creating a kubernetes client 08/24/23 12:33:01.49 +Aug 24 12:33:01.490: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:33:01.491 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:01.517 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:01.521 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 -[It] should provide container's cpu limit [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:193 -STEP: Creating a pod to test downward API volume plugin 07/29/23 16:20:47.179 -Jul 29 16:20:47.199: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627" in namespace "downward-api-3085" to be "Succeeded or Failed" -Jul 29 16:20:47.210: INFO: Pod "downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627": Phase="Pending", Reason="", readiness=false. Elapsed: 11.092903ms -Jul 29 16:20:49.220: INFO: Pod "downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021002292s -Jul 29 16:20:51.220: INFO: Pod "downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02087271s -STEP: Saw pod success 07/29/23 16:20:51.22 -Jul 29 16:20:51.221: INFO: Pod "downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627" satisfied condition "Succeeded or Failed" -Jul 29 16:20:51.228: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627 container client-container: -STEP: delete the pod 07/29/23 16:20:51.266 -Jul 29 16:20:51.294: INFO: Waiting for pod downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627 to disappear -Jul 29 16:20:51.305: INFO: Pod downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627 no longer exists -[AfterEach] [sig-storage] Downward API volume +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[It] should check if v1 is in available api versions [Conformance] + test/e2e/kubectl/kubectl.go:824 +STEP: validating api versions 08/24/23 12:33:01.525 +Aug 24 12:33:01.526: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9418 api-versions' +Aug 24 12:33:01.736: INFO: stderr: "" +Aug 24 12:33:01.736: INFO: stdout: "admissionregistration.k8s.io/v1\napiextensions.k8s.io/v1\napiregistration.k8s.io/v1\napps/v1\nauthentication.k8s.io/v1\nauthorization.k8s.io/v1\nautoscaling/v1\nautoscaling/v2\nbatch/v1\ncertificates.k8s.io/v1\ncilium.io/v2\ncilium.io/v2alpha1\ncoordination.k8s.io/v1\ndiscovery.k8s.io/v1\nevents.k8s.io/v1\nflowcontrol.apiserver.k8s.io/v1beta2\nflowcontrol.apiserver.k8s.io/v1beta3\nnetworking.k8s.io/v1\nnode.k8s.io/v1\npolicy/v1\nrbac.authorization.k8s.io/v1\nscheduling.k8s.io/v1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n" +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 16:20:51.305: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Downward API volume +Aug 24 12:33:01.736: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-3085" for this suite. 07/29/23 16:20:51.317 +STEP: Destroying namespace "kubectl-9418" for this suite. 08/24/23 12:33:01.749 ------------------------------ -• [4.192 seconds] -[sig-storage] Downward API volume -test/e2e/common/storage/framework.go:23 - should provide container's cpu limit [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:193 +• [0.275 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Kubectl api-versions + test/e2e/kubectl/kubectl.go:818 + should check if v1 is in available api versions [Conformance] + test/e2e/kubectl/kubectl.go:824 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Downward API volume + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:20:47.141 - Jul 29 16:20:47.141: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 16:20:47.142 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:47.17 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:47.174 - [BeforeEach] [sig-storage] Downward API volume + STEP: Creating a kubernetes client 08/24/23 12:33:01.49 + Aug 24 12:33:01.490: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:33:01.491 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:01.517 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:01.521 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 - [It] should provide container's cpu limit [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:193 - STEP: Creating a pod to test downward API volume plugin 07/29/23 16:20:47.179 - Jul 29 16:20:47.199: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627" in namespace "downward-api-3085" to be "Succeeded or Failed" - Jul 29 16:20:47.210: INFO: Pod "downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627": Phase="Pending", Reason="", readiness=false. Elapsed: 11.092903ms - Jul 29 16:20:49.220: INFO: Pod "downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021002292s - Jul 29 16:20:51.220: INFO: Pod "downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02087271s - STEP: Saw pod success 07/29/23 16:20:51.22 - Jul 29 16:20:51.221: INFO: Pod "downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627" satisfied condition "Succeeded or Failed" - Jul 29 16:20:51.228: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627 container client-container: - STEP: delete the pod 07/29/23 16:20:51.266 - Jul 29 16:20:51.294: INFO: Waiting for pod downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627 to disappear - Jul 29 16:20:51.305: INFO: Pod downwardapi-volume-9bad8118-7f8c-497c-91f5-6cf3e6f7c627 no longer exists - [AfterEach] [sig-storage] Downward API volume + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [It] should check if v1 is in available api versions [Conformance] + test/e2e/kubectl/kubectl.go:824 + STEP: validating api versions 08/24/23 12:33:01.525 + Aug 24 12:33:01.526: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-9418 api-versions' + Aug 24 12:33:01.736: INFO: stderr: "" + Aug 24 12:33:01.736: INFO: stdout: "admissionregistration.k8s.io/v1\napiextensions.k8s.io/v1\napiregistration.k8s.io/v1\napps/v1\nauthentication.k8s.io/v1\nauthorization.k8s.io/v1\nautoscaling/v1\nautoscaling/v2\nbatch/v1\ncertificates.k8s.io/v1\ncilium.io/v2\ncilium.io/v2alpha1\ncoordination.k8s.io/v1\ndiscovery.k8s.io/v1\nevents.k8s.io/v1\nflowcontrol.apiserver.k8s.io/v1beta2\nflowcontrol.apiserver.k8s.io/v1beta3\nnetworking.k8s.io/v1\nnode.k8s.io/v1\npolicy/v1\nrbac.authorization.k8s.io/v1\nscheduling.k8s.io/v1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n" + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 16:20:51.305: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Downward API volume + Aug 24 12:33:01.736: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-3085" for this suite. 07/29/23 16:20:51.317 + STEP: Destroying namespace "kubectl-9418" for this suite. 08/24/23 12:33:01.749 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] ResourceQuota - should apply changes to a resourcequota status [Conformance] - test/e2e/apimachinery/resource_quota.go:1010 + should create a ResourceQuota and capture the life of a configMap. [Conformance] + test/e2e/apimachinery/resource_quota.go:326 [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:20:51.338 -Jul 29 16:20:51.338: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 16:20:51.341 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:51.375 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:51.38 +STEP: Creating a kubernetes client 08/24/23 12:33:01.768 +Aug 24 12:33:01.768: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 12:33:01.769 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:01.798 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:01.804 [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 -[It] should apply changes to a resourcequota status [Conformance] - test/e2e/apimachinery/resource_quota.go:1010 -STEP: Creating resourceQuota "e2e-rq-status-qprng" 07/29/23 16:20:51.391 -Jul 29 16:20:51.408: INFO: Resource quota "e2e-rq-status-qprng" reports spec: hard cpu limit of 500m -Jul 29 16:20:51.408: INFO: Resource quota "e2e-rq-status-qprng" reports spec: hard memory limit of 500Mi -STEP: Updating resourceQuota "e2e-rq-status-qprng" /status 07/29/23 16:20:51.409 -STEP: Confirm /status for "e2e-rq-status-qprng" resourceQuota via watch 07/29/23 16:20:51.443 -Jul 29 16:20:51.447: INFO: observed resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList(nil) -Jul 29 16:20:51.448: INFO: Found resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:500, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:524288000, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500Mi", Format:"BinarySI"}} -Jul 29 16:20:51.448: INFO: ResourceQuota "e2e-rq-status-qprng" /status was updated -STEP: Patching hard spec values for cpu & memory 07/29/23 16:20:51.454 -Jul 29 16:20:51.469: INFO: Resource quota "e2e-rq-status-qprng" reports spec: hard cpu limit of 1 -Jul 29 16:20:51.469: INFO: Resource quota "e2e-rq-status-qprng" reports spec: hard memory limit of 1Gi -STEP: Patching "e2e-rq-status-qprng" /status 07/29/23 16:20:51.469 -STEP: Confirm /status for "e2e-rq-status-qprng" resourceQuota via watch 07/29/23 16:20:51.48 -Jul 29 16:20:51.483: INFO: observed resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:500, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:524288000, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500Mi", Format:"BinarySI"}} -Jul 29 16:20:51.483: INFO: Found resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:1, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:1073741824, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1Gi", Format:"BinarySI"}} -Jul 29 16:20:51.483: INFO: ResourceQuota "e2e-rq-status-qprng" /status was patched -STEP: Get "e2e-rq-status-qprng" /status 07/29/23 16:20:51.483 -Jul 29 16:20:51.491: INFO: Resourcequota "e2e-rq-status-qprng" reports status: hard cpu of 1 -Jul 29 16:20:51.491: INFO: Resourcequota "e2e-rq-status-qprng" reports status: hard memory of 1Gi -STEP: Repatching "e2e-rq-status-qprng" /status before checking Spec is unchanged 07/29/23 16:20:51.5 -Jul 29 16:20:51.513: INFO: Resourcequota "e2e-rq-status-qprng" reports status: hard cpu of 2 -Jul 29 16:20:51.513: INFO: Resourcequota "e2e-rq-status-qprng" reports status: hard memory of 2Gi -Jul 29 16:20:51.517: INFO: observed resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:1, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:1073741824, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1Gi", Format:"BinarySI"}} -Jul 29 16:20:51.518: INFO: Found resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:2, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"2", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:2147483648, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"2Gi", Format:"BinarySI"}} -Jul 29 16:24:01.536: INFO: ResourceQuota "e2e-rq-status-qprng" Spec was unchanged and /status reset +[It] should create a ResourceQuota and capture the life of a configMap. [Conformance] + test/e2e/apimachinery/resource_quota.go:326 +STEP: Counting existing ResourceQuota 08/24/23 12:33:18.816 +STEP: Creating a ResourceQuota 08/24/23 12:33:23.823 +STEP: Ensuring resource quota status is calculated 08/24/23 12:33:23.832 +STEP: Creating a ConfigMap 08/24/23 12:33:25.842 +STEP: Ensuring resource quota status captures configMap creation 08/24/23 12:33:25.863 +STEP: Deleting a ConfigMap 08/24/23 12:33:27.87 +STEP: Ensuring resource quota status released usage 08/24/23 12:33:27.879 [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 16:24:01.537: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:33:29.889: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-3084" for this suite. 07/29/23 16:24:01.557 +STEP: Destroying namespace "resourcequota-3442" for this suite. 08/24/23 12:33:29.898 ------------------------------ -• [SLOW TEST] [190.238 seconds] +• [SLOW TEST] [28.143 seconds] [sig-api-machinery] ResourceQuota test/e2e/apimachinery/framework.go:23 - should apply changes to a resourcequota status [Conformance] - test/e2e/apimachinery/resource_quota.go:1010 + should create a ResourceQuota and capture the life of a configMap. [Conformance] + test/e2e/apimachinery/resource_quota.go:326 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:20:51.338 - Jul 29 16:20:51.338: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 16:20:51.341 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:20:51.375 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:20:51.38 + STEP: Creating a kubernetes client 08/24/23 12:33:01.768 + Aug 24 12:33:01.768: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 12:33:01.769 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:01.798 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:01.804 [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [It] should apply changes to a resourcequota status [Conformance] - test/e2e/apimachinery/resource_quota.go:1010 - STEP: Creating resourceQuota "e2e-rq-status-qprng" 07/29/23 16:20:51.391 - Jul 29 16:20:51.408: INFO: Resource quota "e2e-rq-status-qprng" reports spec: hard cpu limit of 500m - Jul 29 16:20:51.408: INFO: Resource quota "e2e-rq-status-qprng" reports spec: hard memory limit of 500Mi - STEP: Updating resourceQuota "e2e-rq-status-qprng" /status 07/29/23 16:20:51.409 - STEP: Confirm /status for "e2e-rq-status-qprng" resourceQuota via watch 07/29/23 16:20:51.443 - Jul 29 16:20:51.447: INFO: observed resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList(nil) - Jul 29 16:20:51.448: INFO: Found resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:500, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:524288000, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500Mi", Format:"BinarySI"}} - Jul 29 16:20:51.448: INFO: ResourceQuota "e2e-rq-status-qprng" /status was updated - STEP: Patching hard spec values for cpu & memory 07/29/23 16:20:51.454 - Jul 29 16:20:51.469: INFO: Resource quota "e2e-rq-status-qprng" reports spec: hard cpu limit of 1 - Jul 29 16:20:51.469: INFO: Resource quota "e2e-rq-status-qprng" reports spec: hard memory limit of 1Gi - STEP: Patching "e2e-rq-status-qprng" /status 07/29/23 16:20:51.469 - STEP: Confirm /status for "e2e-rq-status-qprng" resourceQuota via watch 07/29/23 16:20:51.48 - Jul 29 16:20:51.483: INFO: observed resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:500, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:524288000, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500Mi", Format:"BinarySI"}} - Jul 29 16:20:51.483: INFO: Found resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:1, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:1073741824, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1Gi", Format:"BinarySI"}} - Jul 29 16:20:51.483: INFO: ResourceQuota "e2e-rq-status-qprng" /status was patched - STEP: Get "e2e-rq-status-qprng" /status 07/29/23 16:20:51.483 - Jul 29 16:20:51.491: INFO: Resourcequota "e2e-rq-status-qprng" reports status: hard cpu of 1 - Jul 29 16:20:51.491: INFO: Resourcequota "e2e-rq-status-qprng" reports status: hard memory of 1Gi - STEP: Repatching "e2e-rq-status-qprng" /status before checking Spec is unchanged 07/29/23 16:20:51.5 - Jul 29 16:20:51.513: INFO: Resourcequota "e2e-rq-status-qprng" reports status: hard cpu of 2 - Jul 29 16:20:51.513: INFO: Resourcequota "e2e-rq-status-qprng" reports status: hard memory of 2Gi - Jul 29 16:20:51.517: INFO: observed resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:1, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:1073741824, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1Gi", Format:"BinarySI"}} - Jul 29 16:20:51.518: INFO: Found resourceQuota "e2e-rq-status-qprng" in namespace "resourcequota-3084" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:2, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"2", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:2147483648, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"2Gi", Format:"BinarySI"}} - Jul 29 16:24:01.536: INFO: ResourceQuota "e2e-rq-status-qprng" Spec was unchanged and /status reset + [It] should create a ResourceQuota and capture the life of a configMap. [Conformance] + test/e2e/apimachinery/resource_quota.go:326 + STEP: Counting existing ResourceQuota 08/24/23 12:33:18.816 + STEP: Creating a ResourceQuota 08/24/23 12:33:23.823 + STEP: Ensuring resource quota status is calculated 08/24/23 12:33:23.832 + STEP: Creating a ConfigMap 08/24/23 12:33:25.842 + STEP: Ensuring resource quota status captures configMap creation 08/24/23 12:33:25.863 + STEP: Deleting a ConfigMap 08/24/23 12:33:27.87 + STEP: Ensuring resource quota status released usage 08/24/23 12:33:27.879 [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:24:01.537: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:33:29.889: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-3084" for this suite. 07/29/23 16:24:01.557 + STEP: Destroying namespace "resourcequota-3442" for this suite. 08/24/23 12:33:29.898 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:99 -[BeforeEach] [sig-storage] Projected configMap +[sig-storage] ConfigMap + should be immutable if `immutable` field is set [Conformance] + test/e2e/common/storage/configmap_volume.go:504 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:24:01.582 -Jul 29 16:24:01.582: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:24:01.591 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:24:01.636 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:24:01.643 -[BeforeEach] [sig-storage] Projected configMap +STEP: Creating a kubernetes client 08/24/23 12:33:29.918 +Aug 24 12:33:29.918: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 12:33:29.921 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:29.955 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:29.964 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:99 -STEP: Creating configMap with name projected-configmap-test-volume-map-fba9daa2-3e96-4f32-b153-e9fc7634baf3 07/29/23 16:24:01.647 -STEP: Creating a pod to test consume configMaps 07/29/23 16:24:01.655 -Jul 29 16:24:01.672: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393" in namespace "projected-1021" to be "Succeeded or Failed" -Jul 29 16:24:01.678: INFO: Pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393": Phase="Pending", Reason="", readiness=false. Elapsed: 5.191964ms -Jul 29 16:24:03.687: INFO: Pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393": Phase="Running", Reason="", readiness=true. Elapsed: 2.015037553s -Jul 29 16:24:05.687: INFO: Pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393": Phase="Running", Reason="", readiness=false. Elapsed: 4.014572494s -Jul 29 16:24:07.686: INFO: Pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.01398786s -STEP: Saw pod success 07/29/23 16:24:07.687 -Jul 29 16:24:07.687: INFO: Pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393" satisfied condition "Succeeded or Failed" -Jul 29 16:24:07.695: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393 container agnhost-container: -STEP: delete the pod 07/29/23 16:24:07.726 -Jul 29 16:24:07.757: INFO: Waiting for pod pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393 to disappear -Jul 29 16:24:07.766: INFO: Pod pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393 no longer exists -[AfterEach] [sig-storage] Projected configMap +[It] should be immutable if `immutable` field is set [Conformance] + test/e2e/common/storage/configmap_volume.go:504 +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:24:07.766: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected configMap +Aug 24 12:33:30.058: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "projected-1021" for this suite. 07/29/23 16:24:07.781 +STEP: Destroying namespace "configmap-3716" for this suite. 08/24/23 12:33:30.073 ------------------------------ -• [SLOW TEST] [6.212 seconds] -[sig-storage] Projected configMap +• [0.178 seconds] +[sig-storage] ConfigMap test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:99 + should be immutable if `immutable` field is set [Conformance] + test/e2e/common/storage/configmap_volume.go:504 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected configMap + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:24:01.582 - Jul 29 16:24:01.582: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:24:01.591 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:24:01.636 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:24:01.643 - [BeforeEach] [sig-storage] Projected configMap + STEP: Creating a kubernetes client 08/24/23 12:33:29.918 + Aug 24 12:33:29.918: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 12:33:29.921 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:29.955 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:29.964 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:99 - STEP: Creating configMap with name projected-configmap-test-volume-map-fba9daa2-3e96-4f32-b153-e9fc7634baf3 07/29/23 16:24:01.647 - STEP: Creating a pod to test consume configMaps 07/29/23 16:24:01.655 - Jul 29 16:24:01.672: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393" in namespace "projected-1021" to be "Succeeded or Failed" - Jul 29 16:24:01.678: INFO: Pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393": Phase="Pending", Reason="", readiness=false. Elapsed: 5.191964ms - Jul 29 16:24:03.687: INFO: Pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393": Phase="Running", Reason="", readiness=true. Elapsed: 2.015037553s - Jul 29 16:24:05.687: INFO: Pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393": Phase="Running", Reason="", readiness=false. Elapsed: 4.014572494s - Jul 29 16:24:07.686: INFO: Pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.01398786s - STEP: Saw pod success 07/29/23 16:24:07.687 - Jul 29 16:24:07.687: INFO: Pod "pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393" satisfied condition "Succeeded or Failed" - Jul 29 16:24:07.695: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393 container agnhost-container: - STEP: delete the pod 07/29/23 16:24:07.726 - Jul 29 16:24:07.757: INFO: Waiting for pod pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393 to disappear - Jul 29 16:24:07.766: INFO: Pod pod-projected-configmaps-494d62ec-274c-44b9-8293-fa7d35539393 no longer exists - [AfterEach] [sig-storage] Projected configMap + [It] should be immutable if `immutable` field is set [Conformance] + test/e2e/common/storage/configmap_volume.go:504 + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:24:07.766: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected configMap + Aug 24 12:33:30.058: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "projected-1021" for this suite. 07/29/23 16:24:07.781 + STEP: Destroying namespace "configmap-3716" for this suite. 08/24/23 12:33:30.073 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Proxy version v1 - A set of valid responses are returned for both pod and service Proxy [Conformance] - test/e2e/network/proxy.go:380 -[BeforeEach] version v1 +[sig-network] Services + should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] + test/e2e/network/service.go:2213 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:24:07.8 -Jul 29 16:24:07.800: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename proxy 07/29/23 16:24:07.805 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:24:07.842 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:24:07.847 -[BeforeEach] version v1 +STEP: Creating a kubernetes client 08/24/23 12:33:30.097 +Aug 24 12:33:30.098: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 12:33:30.1 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:30.184 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:30.191 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[It] A set of valid responses are returned for both pod and service Proxy [Conformance] - test/e2e/network/proxy.go:380 -Jul 29 16:24:07.854: INFO: Creating pod... -Jul 29 16:24:07.870: INFO: Waiting up to 5m0s for pod "agnhost" in namespace "proxy-4946" to be "running" -Jul 29 16:24:07.878: INFO: Pod "agnhost": Phase="Pending", Reason="", readiness=false. Elapsed: 7.960527ms -Jul 29 16:24:09.888: INFO: Pod "agnhost": Phase="Running", Reason="", readiness=true. Elapsed: 2.018114639s -Jul 29 16:24:09.888: INFO: Pod "agnhost" satisfied condition "running" -Jul 29 16:24:09.888: INFO: Creating service... -Jul 29 16:24:09.912: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=DELETE -Jul 29 16:24:09.930: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE -Jul 29 16:24:09.931: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=OPTIONS -Jul 29 16:24:09.940: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS -Jul 29 16:24:09.940: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=PATCH -Jul 29 16:24:09.950: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH -Jul 29 16:24:09.950: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=POST -Jul 29 16:24:09.962: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST -Jul 29 16:24:09.962: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=PUT -Jul 29 16:24:09.971: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT -Jul 29 16:24:09.972: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=DELETE -Jul 29 16:24:09.989: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE -Jul 29 16:24:09.990: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=OPTIONS -Jul 29 16:24:10.006: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS -Jul 29 16:24:10.007: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=PATCH -Jul 29 16:24:10.023: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH -Jul 29 16:24:10.024: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=POST -Jul 29 16:24:10.050: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST -Jul 29 16:24:10.050: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=PUT -Jul 29 16:24:10.063: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT -Jul 29 16:24:10.063: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=GET -Jul 29 16:24:10.092: INFO: http.Client request:GET StatusCode:301 -Jul 29 16:24:10.092: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=GET -Jul 29 16:24:10.105: INFO: http.Client request:GET StatusCode:301 -Jul 29 16:24:10.105: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=HEAD -Jul 29 16:24:10.111: INFO: http.Client request:HEAD StatusCode:301 -Jul 29 16:24:10.111: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=HEAD -Jul 29 16:24:10.119: INFO: http.Client request:HEAD StatusCode:301 -[AfterEach] version v1 +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] + test/e2e/network/service.go:2213 +STEP: creating service in namespace services-1408 08/24/23 12:33:30.198 +STEP: creating service affinity-clusterip-transition in namespace services-1408 08/24/23 12:33:30.198 +STEP: creating replication controller affinity-clusterip-transition in namespace services-1408 08/24/23 12:33:30.223 +I0824 12:33:30.244093 14 runners.go:193] Created replication controller with name: affinity-clusterip-transition, namespace: services-1408, replica count: 3 +I0824 12:33:33.306571 14 runners.go:193] affinity-clusterip-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Aug 24 12:33:33.322: INFO: Creating new exec pod +Aug 24 12:33:33.354: INFO: Waiting up to 5m0s for pod "execpod-affinitykzgxf" in namespace "services-1408" to be "running" +Aug 24 12:33:33.376: INFO: Pod "execpod-affinitykzgxf": Phase="Pending", Reason="", readiness=false. Elapsed: 21.510139ms +Aug 24 12:33:35.386: INFO: Pod "execpod-affinitykzgxf": Phase="Running", Reason="", readiness=true. Elapsed: 2.032392737s +Aug 24 12:33:35.387: INFO: Pod "execpod-affinitykzgxf" satisfied condition "running" +Aug 24 12:33:36.388: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1408 exec execpod-affinitykzgxf -- /bin/sh -x -c nc -v -z -w 2 affinity-clusterip-transition 80' +Aug 24 12:33:36.699: INFO: stderr: "+ nc -v -z -w 2 affinity-clusterip-transition 80\nConnection to affinity-clusterip-transition 80 port [tcp/http] succeeded!\n" +Aug 24 12:33:36.699: INFO: stdout: "" +Aug 24 12:33:36.700: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1408 exec execpod-affinitykzgxf -- /bin/sh -x -c nc -v -z -w 2 10.233.60.140 80' +Aug 24 12:33:36.974: INFO: stderr: "+ nc -v -z -w 2 10.233.60.140 80\nConnection to 10.233.60.140 80 port [tcp/http] succeeded!\n" +Aug 24 12:33:36.974: INFO: stdout: "" +Aug 24 12:33:36.994: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1408 exec execpod-affinitykzgxf -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.60.140:80/ ; done' +Aug 24 12:33:37.460: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n" +Aug 24 12:33:37.460: INFO: stdout: "\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-bbjv5\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-bbjv5\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv" +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bbjv5 +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bbjv5 +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv +Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv +Aug 24 12:33:37.474: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1408 exec execpod-affinitykzgxf -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.60.140:80/ ; done' +Aug 24 12:33:37.964: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n" +Aug 24 12:33:37.964: INFO: stdout: "\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9" +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 +Aug 24 12:33:37.964: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-clusterip-transition in namespace services-1408, will wait for the garbage collector to delete the pods 08/24/23 12:33:37.986 +Aug 24 12:33:38.056: INFO: Deleting ReplicationController affinity-clusterip-transition took: 12.376733ms +Aug 24 12:33:38.157: INFO: Terminating ReplicationController affinity-clusterip-transition pods took: 101.024964ms +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 16:24:10.119: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] version v1 +Aug 24 12:33:40.501: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] version v1 +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] version v1 +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "proxy-4946" for this suite. 07/29/23 16:24:10.127 +STEP: Destroying namespace "services-1408" for this suite. 08/24/23 12:33:40.511 ------------------------------ -• [2.342 seconds] -[sig-network] Proxy +• [SLOW TEST] [10.447 seconds] +[sig-network] Services test/e2e/network/common/framework.go:23 - version v1 - test/e2e/network/proxy.go:74 - A set of valid responses are returned for both pod and service Proxy [Conformance] - test/e2e/network/proxy.go:380 + should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] + test/e2e/network/service.go:2213 Begin Captured GinkgoWriter Output >> - [BeforeEach] version v1 + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:24:07.8 - Jul 29 16:24:07.800: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename proxy 07/29/23 16:24:07.805 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:24:07.842 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:24:07.847 - [BeforeEach] version v1 + STEP: Creating a kubernetes client 08/24/23 12:33:30.097 + Aug 24 12:33:30.098: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 12:33:30.1 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:30.184 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:30.191 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [It] A set of valid responses are returned for both pod and service Proxy [Conformance] - test/e2e/network/proxy.go:380 - Jul 29 16:24:07.854: INFO: Creating pod... - Jul 29 16:24:07.870: INFO: Waiting up to 5m0s for pod "agnhost" in namespace "proxy-4946" to be "running" - Jul 29 16:24:07.878: INFO: Pod "agnhost": Phase="Pending", Reason="", readiness=false. Elapsed: 7.960527ms - Jul 29 16:24:09.888: INFO: Pod "agnhost": Phase="Running", Reason="", readiness=true. Elapsed: 2.018114639s - Jul 29 16:24:09.888: INFO: Pod "agnhost" satisfied condition "running" - Jul 29 16:24:09.888: INFO: Creating service... - Jul 29 16:24:09.912: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=DELETE - Jul 29 16:24:09.930: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE - Jul 29 16:24:09.931: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=OPTIONS - Jul 29 16:24:09.940: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS - Jul 29 16:24:09.940: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=PATCH - Jul 29 16:24:09.950: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH - Jul 29 16:24:09.950: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=POST - Jul 29 16:24:09.962: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST - Jul 29 16:24:09.962: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=PUT - Jul 29 16:24:09.971: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT - Jul 29 16:24:09.972: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=DELETE - Jul 29 16:24:09.989: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE - Jul 29 16:24:09.990: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=OPTIONS - Jul 29 16:24:10.006: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS - Jul 29 16:24:10.007: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=PATCH - Jul 29 16:24:10.023: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH - Jul 29 16:24:10.024: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=POST - Jul 29 16:24:10.050: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST - Jul 29 16:24:10.050: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=PUT - Jul 29 16:24:10.063: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT - Jul 29 16:24:10.063: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=GET - Jul 29 16:24:10.092: INFO: http.Client request:GET StatusCode:301 - Jul 29 16:24:10.092: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=GET - Jul 29 16:24:10.105: INFO: http.Client request:GET StatusCode:301 - Jul 29 16:24:10.105: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/pods/agnhost/proxy?method=HEAD - Jul 29 16:24:10.111: INFO: http.Client request:HEAD StatusCode:301 - Jul 29 16:24:10.111: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4946/services/e2e-proxy-test-service/proxy?method=HEAD - Jul 29 16:24:10.119: INFO: http.Client request:HEAD StatusCode:301 - [AfterEach] version v1 + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] + test/e2e/network/service.go:2213 + STEP: creating service in namespace services-1408 08/24/23 12:33:30.198 + STEP: creating service affinity-clusterip-transition in namespace services-1408 08/24/23 12:33:30.198 + STEP: creating replication controller affinity-clusterip-transition in namespace services-1408 08/24/23 12:33:30.223 + I0824 12:33:30.244093 14 runners.go:193] Created replication controller with name: affinity-clusterip-transition, namespace: services-1408, replica count: 3 + I0824 12:33:33.306571 14 runners.go:193] affinity-clusterip-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + Aug 24 12:33:33.322: INFO: Creating new exec pod + Aug 24 12:33:33.354: INFO: Waiting up to 5m0s for pod "execpod-affinitykzgxf" in namespace "services-1408" to be "running" + Aug 24 12:33:33.376: INFO: Pod "execpod-affinitykzgxf": Phase="Pending", Reason="", readiness=false. Elapsed: 21.510139ms + Aug 24 12:33:35.386: INFO: Pod "execpod-affinitykzgxf": Phase="Running", Reason="", readiness=true. Elapsed: 2.032392737s + Aug 24 12:33:35.387: INFO: Pod "execpod-affinitykzgxf" satisfied condition "running" + Aug 24 12:33:36.388: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1408 exec execpod-affinitykzgxf -- /bin/sh -x -c nc -v -z -w 2 affinity-clusterip-transition 80' + Aug 24 12:33:36.699: INFO: stderr: "+ nc -v -z -w 2 affinity-clusterip-transition 80\nConnection to affinity-clusterip-transition 80 port [tcp/http] succeeded!\n" + Aug 24 12:33:36.699: INFO: stdout: "" + Aug 24 12:33:36.700: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1408 exec execpod-affinitykzgxf -- /bin/sh -x -c nc -v -z -w 2 10.233.60.140 80' + Aug 24 12:33:36.974: INFO: stderr: "+ nc -v -z -w 2 10.233.60.140 80\nConnection to 10.233.60.140 80 port [tcp/http] succeeded!\n" + Aug 24 12:33:36.974: INFO: stdout: "" + Aug 24 12:33:36.994: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1408 exec execpod-affinitykzgxf -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.60.140:80/ ; done' + Aug 24 12:33:37.460: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n" + Aug 24 12:33:37.460: INFO: stdout: "\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-bbjv5\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-bbjv5\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv\naffinity-clusterip-transition-bnflv" + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bbjv5 + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bbjv5 + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv + Aug 24 12:33:37.460: INFO: Received response from host: affinity-clusterip-transition-bnflv + Aug 24 12:33:37.474: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1408 exec execpod-affinitykzgxf -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.233.60.140:80/ ; done' + Aug 24 12:33:37.964: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.233.60.140:80/\n" + Aug 24 12:33:37.964: INFO: stdout: "\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9\naffinity-clusterip-transition-srsb9" + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Received response from host: affinity-clusterip-transition-srsb9 + Aug 24 12:33:37.964: INFO: Cleaning up the exec pod + STEP: deleting ReplicationController affinity-clusterip-transition in namespace services-1408, will wait for the garbage collector to delete the pods 08/24/23 12:33:37.986 + Aug 24 12:33:38.056: INFO: Deleting ReplicationController affinity-clusterip-transition took: 12.376733ms + Aug 24 12:33:38.157: INFO: Terminating ReplicationController affinity-clusterip-transition pods took: 101.024964ms + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 16:24:10.119: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] version v1 + Aug 24 12:33:40.501: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] version v1 + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] version v1 + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "proxy-4946" for this suite. 07/29/23 16:24:10.127 + STEP: Destroying namespace "services-1408" for this suite. 08/24/23 12:33:40.511 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSS +SSSSS ------------------------------ -[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] - should perform rolling updates and roll backs of template modifications [Conformance] - test/e2e/apps/statefulset.go:306 -[BeforeEach] [sig-apps] StatefulSet +[sig-node] Variable Expansion + should allow substituting values in a container's args [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:92 +[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:24:10.148 -Jul 29 16:24:10.148: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename statefulset 07/29/23 16:24:10.152 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:24:10.201 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:24:10.207 -[BeforeEach] [sig-apps] StatefulSet +STEP: Creating a kubernetes client 08/24/23 12:33:40.553 +Aug 24 12:33:40.553: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename var-expansion 08/24/23 12:33:40.556 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:40.592 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:40.599 +[BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 -[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 -STEP: Creating service test in namespace statefulset-2985 07/29/23 16:24:10.213 -[It] should perform rolling updates and roll backs of template modifications [Conformance] - test/e2e/apps/statefulset.go:306 -STEP: Creating a new StatefulSet 07/29/23 16:24:10.223 -Jul 29 16:24:10.242: INFO: Found 0 stateful pods, waiting for 3 -Jul 29 16:24:20.256: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 16:24:20.257: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 16:24:20.257: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 16:24:20.289: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-2985 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Jul 29 16:24:20.584: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Jul 29 16:24:20.584: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Jul 29 16:24:20.584: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -STEP: Updating StatefulSet template: update image from registry.k8s.io/e2e-test-images/httpd:2.4.38-4 to registry.k8s.io/e2e-test-images/httpd:2.4.39-4 07/29/23 16:24:30.655 -Jul 29 16:24:30.690: INFO: Updating stateful set ss2 -STEP: Creating a new revision 07/29/23 16:24:30.69 -STEP: Updating Pods in reverse ordinal order 07/29/23 16:24:40.728 -Jul 29 16:24:40.735: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-2985 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Jul 29 16:24:41.014: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Jul 29 16:24:41.014: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Jul 29 16:24:41.014: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -STEP: Rolling back to a previous revision 07/29/23 16:24:51.067 -Jul 29 16:24:51.069: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-2985 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Jul 29 16:24:51.356: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Jul 29 16:24:51.356: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Jul 29 16:24:51.356: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Jul 29 16:25:01.421: INFO: Updating stateful set ss2 -STEP: Rolling back update in reverse ordinal order 07/29/23 16:25:11.463 -Jul 29 16:25:11.475: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-2985 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Jul 29 16:25:11.776: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Jul 29 16:25:11.776: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Jul 29 16:25:11.776: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 -Jul 29 16:25:21.837: INFO: Deleting all statefulset in ns statefulset-2985 -Jul 29 16:25:21.843: INFO: Scaling statefulset ss2 to 0 -Jul 29 16:25:31.915: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 16:25:31.923: INFO: Deleting statefulset ss2 -[AfterEach] [sig-apps] StatefulSet +[It] should allow substituting values in a container's args [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:92 +STEP: Creating a pod to test substitution in container's args 08/24/23 12:33:40.605 +Aug 24 12:33:40.619: INFO: Waiting up to 5m0s for pod "var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f" in namespace "var-expansion-8900" to be "Succeeded or Failed" +Aug 24 12:33:40.626: INFO: Pod "var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f": Phase="Pending", Reason="", readiness=false. Elapsed: 6.278822ms +Aug 24 12:33:42.634: INFO: Pod "var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014324623s +Aug 24 12:33:44.634: INFO: Pod "var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014408255s +STEP: Saw pod success 08/24/23 12:33:44.634 +Aug 24 12:33:44.635: INFO: Pod "var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f" satisfied condition "Succeeded or Failed" +Aug 24 12:33:44.640: INFO: Trying to get logs from node pe9deep4seen-3 pod var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f container dapi-container: +STEP: delete the pod 08/24/23 12:33:44.672 +Aug 24 12:33:44.691: INFO: Waiting for pod var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f to disappear +Aug 24 12:33:44.698: INFO: Pod var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f no longer exists +[AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 -Jul 29 16:25:31.960: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] StatefulSet +Aug 24 12:33:44.698: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 -STEP: Destroying namespace "statefulset-2985" for this suite. 07/29/23 16:25:31.97 +STEP: Destroying namespace "var-expansion-8900" for this suite. 08/24/23 12:33:44.713 ------------------------------ -• [SLOW TEST] [81.836 seconds] -[sig-apps] StatefulSet -test/e2e/apps/framework.go:23 - Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:103 - should perform rolling updates and roll backs of template modifications [Conformance] - test/e2e/apps/statefulset.go:306 +• [4.173 seconds] +[sig-node] Variable Expansion +test/e2e/common/node/framework.go:23 + should allow substituting values in a container's args [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:92 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] StatefulSet + [BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:24:10.148 - Jul 29 16:24:10.148: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename statefulset 07/29/23 16:24:10.152 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:24:10.201 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:24:10.207 - [BeforeEach] [sig-apps] StatefulSet + STEP: Creating a kubernetes client 08/24/23 12:33:40.553 + Aug 24 12:33:40.553: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename var-expansion 08/24/23 12:33:40.556 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:40.592 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:40.599 + [BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 - [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 - STEP: Creating service test in namespace statefulset-2985 07/29/23 16:24:10.213 - [It] should perform rolling updates and roll backs of template modifications [Conformance] - test/e2e/apps/statefulset.go:306 - STEP: Creating a new StatefulSet 07/29/23 16:24:10.223 - Jul 29 16:24:10.242: INFO: Found 0 stateful pods, waiting for 3 - Jul 29 16:24:20.256: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 16:24:20.257: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 16:24:20.257: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 16:24:20.289: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-2985 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' - Jul 29 16:24:20.584: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" - Jul 29 16:24:20.584: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" - Jul 29 16:24:20.584: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - - STEP: Updating StatefulSet template: update image from registry.k8s.io/e2e-test-images/httpd:2.4.38-4 to registry.k8s.io/e2e-test-images/httpd:2.4.39-4 07/29/23 16:24:30.655 - Jul 29 16:24:30.690: INFO: Updating stateful set ss2 - STEP: Creating a new revision 07/29/23 16:24:30.69 - STEP: Updating Pods in reverse ordinal order 07/29/23 16:24:40.728 - Jul 29 16:24:40.735: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-2985 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' - Jul 29 16:24:41.014: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" - Jul 29 16:24:41.014: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" - Jul 29 16:24:41.014: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - - STEP: Rolling back to a previous revision 07/29/23 16:24:51.067 - Jul 29 16:24:51.069: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-2985 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' - Jul 29 16:24:51.356: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" - Jul 29 16:24:51.356: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" - Jul 29 16:24:51.356: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - - Jul 29 16:25:01.421: INFO: Updating stateful set ss2 - STEP: Rolling back update in reverse ordinal order 07/29/23 16:25:11.463 - Jul 29 16:25:11.475: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-2985 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' - Jul 29 16:25:11.776: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" - Jul 29 16:25:11.776: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" - Jul 29 16:25:11.776: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - - [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 - Jul 29 16:25:21.837: INFO: Deleting all statefulset in ns statefulset-2985 - Jul 29 16:25:21.843: INFO: Scaling statefulset ss2 to 0 - Jul 29 16:25:31.915: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 16:25:31.923: INFO: Deleting statefulset ss2 - [AfterEach] [sig-apps] StatefulSet + [It] should allow substituting values in a container's args [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:92 + STEP: Creating a pod to test substitution in container's args 08/24/23 12:33:40.605 + Aug 24 12:33:40.619: INFO: Waiting up to 5m0s for pod "var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f" in namespace "var-expansion-8900" to be "Succeeded or Failed" + Aug 24 12:33:40.626: INFO: Pod "var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f": Phase="Pending", Reason="", readiness=false. Elapsed: 6.278822ms + Aug 24 12:33:42.634: INFO: Pod "var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014324623s + Aug 24 12:33:44.634: INFO: Pod "var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014408255s + STEP: Saw pod success 08/24/23 12:33:44.634 + Aug 24 12:33:44.635: INFO: Pod "var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f" satisfied condition "Succeeded or Failed" + Aug 24 12:33:44.640: INFO: Trying to get logs from node pe9deep4seen-3 pod var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f container dapi-container: + STEP: delete the pod 08/24/23 12:33:44.672 + Aug 24 12:33:44.691: INFO: Waiting for pod var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f to disappear + Aug 24 12:33:44.698: INFO: Pod var-expansion-a32bdd95-0af5-4977-8323-11e3e46f9e2f no longer exists + [AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 - Jul 29 16:25:31.960: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] StatefulSet + Aug 24 12:33:44.698: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 - STEP: Destroying namespace "statefulset-2985" for this suite. 07/29/23 16:25:31.97 + STEP: Destroying namespace "var-expansion-8900" for this suite. 08/24/23 12:33:44.713 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSS +S ------------------------------ -[sig-node] Containers - should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:73 -[BeforeEach] [sig-node] Containers +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + listing mutating webhooks should work [Conformance] + test/e2e/apimachinery/webhook.go:656 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:25:31.999 -Jul 29 16:25:32.001: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename containers 07/29/23 16:25:32.006 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:25:32.039 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:25:32.044 -[BeforeEach] [sig-node] Containers +STEP: Creating a kubernetes client 08/24/23 12:33:44.726 +Aug 24 12:33:44.726: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 12:33:44.729 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:44.754 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:44.761 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:73 -STEP: Creating a pod to test override command 07/29/23 16:25:32.051 -Jul 29 16:25:32.066: INFO: Waiting up to 5m0s for pod "client-containers-c3352c8b-607a-463d-9481-36c99f9491c4" in namespace "containers-2819" to be "Succeeded or Failed" -Jul 29 16:25:32.072: INFO: Pod "client-containers-c3352c8b-607a-463d-9481-36c99f9491c4": Phase="Pending", Reason="", readiness=false. Elapsed: 5.421063ms -Jul 29 16:25:34.087: INFO: Pod "client-containers-c3352c8b-607a-463d-9481-36c99f9491c4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020855413s -Jul 29 16:25:36.080: INFO: Pod "client-containers-c3352c8b-607a-463d-9481-36c99f9491c4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013874862s -STEP: Saw pod success 07/29/23 16:25:36.08 -Jul 29 16:25:36.081: INFO: Pod "client-containers-c3352c8b-607a-463d-9481-36c99f9491c4" satisfied condition "Succeeded or Failed" -Jul 29 16:25:36.088: INFO: Trying to get logs from node wetuj3nuajog-3 pod client-containers-c3352c8b-607a-463d-9481-36c99f9491c4 container agnhost-container: -STEP: delete the pod 07/29/23 16:25:36.103 -Jul 29 16:25:36.137: INFO: Waiting for pod client-containers-c3352c8b-607a-463d-9481-36c99f9491c4 to disappear -Jul 29 16:25:36.141: INFO: Pod client-containers-c3352c8b-607a-463d-9481-36c99f9491c4 no longer exists -[AfterEach] [sig-node] Containers +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 12:33:44.799 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:33:45.896 +STEP: Deploying the webhook pod 08/24/23 12:33:45.913 +STEP: Wait for the deployment to be ready 08/24/23 12:33:45.935 +Aug 24 12:33:45.950: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +STEP: Deploying the webhook service 08/24/23 12:33:47.971 +STEP: Verifying the service has paired with the endpoint 08/24/23 12:33:47.991 +Aug 24 12:33:48.993: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] listing mutating webhooks should work [Conformance] + test/e2e/apimachinery/webhook.go:656 +STEP: Listing all of the created validation webhooks 08/24/23 12:33:49.117 +STEP: Creating a configMap that should be mutated 08/24/23 12:33:49.148 +STEP: Deleting the collection of validation webhooks 08/24/23 12:33:49.203 +STEP: Creating a configMap that should not be mutated 08/24/23 12:33:49.304 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:25:36.142: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Containers +Aug 24 12:33:49.326: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Containers +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Containers +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "containers-2819" for this suite. 07/29/23 16:25:36.15 +STEP: Destroying namespace "webhook-7320" for this suite. 08/24/23 12:33:49.424 +STEP: Destroying namespace "webhook-7320-markers" for this suite. 08/24/23 12:33:49.437 ------------------------------ -• [4.168 seconds] -[sig-node] Containers -test/e2e/common/node/framework.go:23 - should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:73 +• [4.721 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + listing mutating webhooks should work [Conformance] + test/e2e/apimachinery/webhook.go:656 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Containers + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:25:31.999 - Jul 29 16:25:32.001: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename containers 07/29/23 16:25:32.006 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:25:32.039 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:25:32.044 - [BeforeEach] [sig-node] Containers + STEP: Creating a kubernetes client 08/24/23 12:33:44.726 + Aug 24 12:33:44.726: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 12:33:44.729 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:44.754 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:44.761 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:73 - STEP: Creating a pod to test override command 07/29/23 16:25:32.051 - Jul 29 16:25:32.066: INFO: Waiting up to 5m0s for pod "client-containers-c3352c8b-607a-463d-9481-36c99f9491c4" in namespace "containers-2819" to be "Succeeded or Failed" - Jul 29 16:25:32.072: INFO: Pod "client-containers-c3352c8b-607a-463d-9481-36c99f9491c4": Phase="Pending", Reason="", readiness=false. Elapsed: 5.421063ms - Jul 29 16:25:34.087: INFO: Pod "client-containers-c3352c8b-607a-463d-9481-36c99f9491c4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020855413s - Jul 29 16:25:36.080: INFO: Pod "client-containers-c3352c8b-607a-463d-9481-36c99f9491c4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013874862s - STEP: Saw pod success 07/29/23 16:25:36.08 - Jul 29 16:25:36.081: INFO: Pod "client-containers-c3352c8b-607a-463d-9481-36c99f9491c4" satisfied condition "Succeeded or Failed" - Jul 29 16:25:36.088: INFO: Trying to get logs from node wetuj3nuajog-3 pod client-containers-c3352c8b-607a-463d-9481-36c99f9491c4 container agnhost-container: - STEP: delete the pod 07/29/23 16:25:36.103 - Jul 29 16:25:36.137: INFO: Waiting for pod client-containers-c3352c8b-607a-463d-9481-36c99f9491c4 to disappear - Jul 29 16:25:36.141: INFO: Pod client-containers-c3352c8b-607a-463d-9481-36c99f9491c4 no longer exists - [AfterEach] [sig-node] Containers + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 12:33:44.799 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:33:45.896 + STEP: Deploying the webhook pod 08/24/23 12:33:45.913 + STEP: Wait for the deployment to be ready 08/24/23 12:33:45.935 + Aug 24 12:33:45.950: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created + STEP: Deploying the webhook service 08/24/23 12:33:47.971 + STEP: Verifying the service has paired with the endpoint 08/24/23 12:33:47.991 + Aug 24 12:33:48.993: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] listing mutating webhooks should work [Conformance] + test/e2e/apimachinery/webhook.go:656 + STEP: Listing all of the created validation webhooks 08/24/23 12:33:49.117 + STEP: Creating a configMap that should be mutated 08/24/23 12:33:49.148 + STEP: Deleting the collection of validation webhooks 08/24/23 12:33:49.203 + STEP: Creating a configMap that should not be mutated 08/24/23 12:33:49.304 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:25:36.142: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Containers + Aug 24 12:33:49.326: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Containers + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Containers + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "containers-2819" for this suite. 07/29/23 16:25:36.15 + STEP: Destroying namespace "webhook-7320" for this suite. 08/24/23 12:33:49.424 + STEP: Destroying namespace "webhook-7320-markers" for this suite. 08/24/23 12:33:49.437 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSS +SSSSS ------------------------------ -[sig-node] Containers - should be able to override the image's default command and arguments [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:87 -[BeforeEach] [sig-node] Containers +[sig-storage] EmptyDir volumes + should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:167 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:25:36.173 -Jul 29 16:25:36.173: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename containers 07/29/23 16:25:36.175 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:25:36.213 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:25:36.218 -[BeforeEach] [sig-node] Containers +STEP: Creating a kubernetes client 08/24/23 12:33:49.46 +Aug 24 12:33:49.460: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:33:49.464 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:49.492 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:49.499 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:87 -STEP: Creating a pod to test override all 07/29/23 16:25:36.222 -Jul 29 16:25:36.237: INFO: Waiting up to 5m0s for pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f" in namespace "containers-285" to be "Succeeded or Failed" -Jul 29 16:25:36.243: INFO: Pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f": Phase="Pending", Reason="", readiness=false. Elapsed: 5.899017ms -Jul 29 16:25:38.256: INFO: Pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019070064s -Jul 29 16:25:40.255: INFO: Pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f": Phase="Pending", Reason="", readiness=false. Elapsed: 4.017363235s -Jul 29 16:25:42.253: INFO: Pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.015458813s -STEP: Saw pod success 07/29/23 16:25:42.253 -Jul 29 16:25:42.254: INFO: Pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f" satisfied condition "Succeeded or Failed" -Jul 29 16:25:42.261: INFO: Trying to get logs from node wetuj3nuajog-3 pod client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f container agnhost-container: -STEP: delete the pod 07/29/23 16:25:42.279 -Jul 29 16:25:42.311: INFO: Waiting for pod client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f to disappear -Jul 29 16:25:42.319: INFO: Pod client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f no longer exists -[AfterEach] [sig-node] Containers +[It] should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:167 +STEP: Creating a pod to test emptydir 0644 on node default medium 08/24/23 12:33:49.507 +Aug 24 12:33:49.522: INFO: Waiting up to 5m0s for pod "pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b" in namespace "emptydir-2055" to be "Succeeded or Failed" +Aug 24 12:33:49.526: INFO: Pod "pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b": Phase="Pending", Reason="", readiness=false. Elapsed: 4.425296ms +Aug 24 12:33:51.535: INFO: Pod "pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013135886s +Aug 24 12:33:53.535: INFO: Pod "pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012679947s +STEP: Saw pod success 08/24/23 12:33:53.535 +Aug 24 12:33:53.536: INFO: Pod "pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b" satisfied condition "Succeeded or Failed" +Aug 24 12:33:53.543: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b container test-container: +STEP: delete the pod 08/24/23 12:33:53.555 +Aug 24 12:33:53.575: INFO: Waiting for pod pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b to disappear +Aug 24 12:33:53.581: INFO: Pod pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b no longer exists +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:25:42.320: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Containers +Aug 24 12:33:53.582: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Containers +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Containers +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "containers-285" for this suite. 07/29/23 16:25:42.328 +STEP: Destroying namespace "emptydir-2055" for this suite. 08/24/23 12:33:53.594 ------------------------------ -• [SLOW TEST] [6.169 seconds] -[sig-node] Containers -test/e2e/common/node/framework.go:23 - should be able to override the image's default command and arguments [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:87 +• [4.144 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:167 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Containers + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:25:36.173 - Jul 29 16:25:36.173: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename containers 07/29/23 16:25:36.175 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:25:36.213 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:25:36.218 - [BeforeEach] [sig-node] Containers + STEP: Creating a kubernetes client 08/24/23 12:33:49.46 + Aug 24 12:33:49.460: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:33:49.464 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:49.492 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:49.499 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [It] should be able to override the image's default command and arguments [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:87 - STEP: Creating a pod to test override all 07/29/23 16:25:36.222 - Jul 29 16:25:36.237: INFO: Waiting up to 5m0s for pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f" in namespace "containers-285" to be "Succeeded or Failed" - Jul 29 16:25:36.243: INFO: Pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f": Phase="Pending", Reason="", readiness=false. Elapsed: 5.899017ms - Jul 29 16:25:38.256: INFO: Pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019070064s - Jul 29 16:25:40.255: INFO: Pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f": Phase="Pending", Reason="", readiness=false. Elapsed: 4.017363235s - Jul 29 16:25:42.253: INFO: Pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.015458813s - STEP: Saw pod success 07/29/23 16:25:42.253 - Jul 29 16:25:42.254: INFO: Pod "client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f" satisfied condition "Succeeded or Failed" - Jul 29 16:25:42.261: INFO: Trying to get logs from node wetuj3nuajog-3 pod client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f container agnhost-container: - STEP: delete the pod 07/29/23 16:25:42.279 - Jul 29 16:25:42.311: INFO: Waiting for pod client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f to disappear - Jul 29 16:25:42.319: INFO: Pod client-containers-469c3c32-49cf-4346-8122-9eb61c4a3f9f no longer exists - [AfterEach] [sig-node] Containers + [It] should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:167 + STEP: Creating a pod to test emptydir 0644 on node default medium 08/24/23 12:33:49.507 + Aug 24 12:33:49.522: INFO: Waiting up to 5m0s for pod "pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b" in namespace "emptydir-2055" to be "Succeeded or Failed" + Aug 24 12:33:49.526: INFO: Pod "pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b": Phase="Pending", Reason="", readiness=false. Elapsed: 4.425296ms + Aug 24 12:33:51.535: INFO: Pod "pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013135886s + Aug 24 12:33:53.535: INFO: Pod "pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012679947s + STEP: Saw pod success 08/24/23 12:33:53.535 + Aug 24 12:33:53.536: INFO: Pod "pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b" satisfied condition "Succeeded or Failed" + Aug 24 12:33:53.543: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b container test-container: + STEP: delete the pod 08/24/23 12:33:53.555 + Aug 24 12:33:53.575: INFO: Waiting for pod pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b to disappear + Aug 24 12:33:53.581: INFO: Pod pod-17d91193-563b-45f7-9cfc-bfa4ca5f5e4b no longer exists + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:25:42.320: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Containers + Aug 24 12:33:53.582: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Containers + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Containers + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "containers-285" for this suite. 07/29/23 16:25:42.328 + STEP: Destroying namespace "emptydir-2055" for this suite. 08/24/23 12:33:53.594 << End Captured GinkgoWriter Output ------------------------------ -S +SS ------------------------------ -[sig-storage] Downward API volume - should provide container's memory limit [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:207 -[BeforeEach] [sig-storage] Downward API volume +[sig-node] ConfigMap + should be consumable via environment variable [NodeConformance] [Conformance] + test/e2e/common/node/configmap.go:45 +[BeforeEach] [sig-node] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:25:42.344 -Jul 29 16:25:42.344: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 16:25:42.35 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:25:42.382 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:25:42.387 -[BeforeEach] [sig-storage] Downward API volume +STEP: Creating a kubernetes client 08/24/23 12:33:53.609 +Aug 24 12:33:53.609: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 12:33:53.613 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:53.643 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:53.648 +[BeforeEach] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 -[It] should provide container's memory limit [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:207 -STEP: Creating a pod to test downward API volume plugin 07/29/23 16:25:42.393 -Jul 29 16:25:42.411: INFO: Waiting up to 5m0s for pod "downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a" in namespace "downward-api-5700" to be "Succeeded or Failed" -Jul 29 16:25:42.421: INFO: Pod "downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a": Phase="Pending", Reason="", readiness=false. Elapsed: 9.690549ms -Jul 29 16:25:44.432: INFO: Pod "downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020430649s -Jul 29 16:25:46.430: INFO: Pod "downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018464805s -STEP: Saw pod success 07/29/23 16:25:46.43 -Jul 29 16:25:46.430: INFO: Pod "downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a" satisfied condition "Succeeded or Failed" -Jul 29 16:25:46.436: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a container client-container: -STEP: delete the pod 07/29/23 16:25:46.45 -Jul 29 16:25:46.476: INFO: Waiting for pod downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a to disappear -Jul 29 16:25:46.482: INFO: Pod downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a no longer exists -[AfterEach] [sig-storage] Downward API volume +[It] should be consumable via environment variable [NodeConformance] [Conformance] + test/e2e/common/node/configmap.go:45 +STEP: Creating configMap configmap-1289/configmap-test-6dc1437d-e477-4a57-bd89-458ca7400e8a 08/24/23 12:33:53.653 +STEP: Creating a pod to test consume configMaps 08/24/23 12:33:53.66 +Aug 24 12:33:53.674: INFO: Waiting up to 5m0s for pod "pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8" in namespace "configmap-1289" to be "Succeeded or Failed" +Aug 24 12:33:53.681: INFO: Pod "pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8": Phase="Pending", Reason="", readiness=false. Elapsed: 6.840054ms +Aug 24 12:33:55.692: INFO: Pod "pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017881268s +Aug 24 12:33:57.688: INFO: Pod "pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013516739s +STEP: Saw pod success 08/24/23 12:33:57.688 +Aug 24 12:33:57.688: INFO: Pod "pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8" satisfied condition "Succeeded or Failed" +Aug 24 12:33:57.697: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8 container env-test: +STEP: delete the pod 08/24/23 12:33:57.711 +Aug 24 12:33:57.729: INFO: Waiting for pod pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8 to disappear +Aug 24 12:33:57.735: INFO: Pod pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8 no longer exists +[AfterEach] [sig-node] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:25:46.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Downward API volume +Aug 24 12:33:57.735: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-node] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-node] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-5700" for this suite. 07/29/23 16:25:46.493 +STEP: Destroying namespace "configmap-1289" for this suite. 08/24/23 12:33:57.744 ------------------------------ -• [4.163 seconds] -[sig-storage] Downward API volume -test/e2e/common/storage/framework.go:23 - should provide container's memory limit [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:207 +• [4.147 seconds] +[sig-node] ConfigMap +test/e2e/common/node/framework.go:23 + should be consumable via environment variable [NodeConformance] [Conformance] + test/e2e/common/node/configmap.go:45 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Downward API volume + [BeforeEach] [sig-node] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:25:42.344 - Jul 29 16:25:42.344: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 16:25:42.35 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:25:42.382 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:25:42.387 - [BeforeEach] [sig-storage] Downward API volume + STEP: Creating a kubernetes client 08/24/23 12:33:53.609 + Aug 24 12:33:53.609: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 12:33:53.613 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:53.643 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:53.648 + [BeforeEach] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 - [It] should provide container's memory limit [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:207 - STEP: Creating a pod to test downward API volume plugin 07/29/23 16:25:42.393 - Jul 29 16:25:42.411: INFO: Waiting up to 5m0s for pod "downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a" in namespace "downward-api-5700" to be "Succeeded or Failed" - Jul 29 16:25:42.421: INFO: Pod "downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a": Phase="Pending", Reason="", readiness=false. Elapsed: 9.690549ms - Jul 29 16:25:44.432: INFO: Pod "downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020430649s - Jul 29 16:25:46.430: INFO: Pod "downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018464805s - STEP: Saw pod success 07/29/23 16:25:46.43 - Jul 29 16:25:46.430: INFO: Pod "downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a" satisfied condition "Succeeded or Failed" - Jul 29 16:25:46.436: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a container client-container: - STEP: delete the pod 07/29/23 16:25:46.45 - Jul 29 16:25:46.476: INFO: Waiting for pod downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a to disappear - Jul 29 16:25:46.482: INFO: Pod downwardapi-volume-187cb7b3-1067-485a-8b24-8639ab743b0a no longer exists - [AfterEach] [sig-storage] Downward API volume + [It] should be consumable via environment variable [NodeConformance] [Conformance] + test/e2e/common/node/configmap.go:45 + STEP: Creating configMap configmap-1289/configmap-test-6dc1437d-e477-4a57-bd89-458ca7400e8a 08/24/23 12:33:53.653 + STEP: Creating a pod to test consume configMaps 08/24/23 12:33:53.66 + Aug 24 12:33:53.674: INFO: Waiting up to 5m0s for pod "pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8" in namespace "configmap-1289" to be "Succeeded or Failed" + Aug 24 12:33:53.681: INFO: Pod "pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8": Phase="Pending", Reason="", readiness=false. Elapsed: 6.840054ms + Aug 24 12:33:55.692: INFO: Pod "pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017881268s + Aug 24 12:33:57.688: INFO: Pod "pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013516739s + STEP: Saw pod success 08/24/23 12:33:57.688 + Aug 24 12:33:57.688: INFO: Pod "pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8" satisfied condition "Succeeded or Failed" + Aug 24 12:33:57.697: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8 container env-test: + STEP: delete the pod 08/24/23 12:33:57.711 + Aug 24 12:33:57.729: INFO: Waiting for pod pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8 to disappear + Aug 24 12:33:57.735: INFO: Pod pod-configmaps-0968bb4f-390b-43bb-805d-6d32368bb8e8 no longer exists + [AfterEach] [sig-node] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:25:46.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Downward API volume + Aug 24 12:33:57.735: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-node] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-node] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-5700" for this suite. 07/29/23 16:25:46.493 + STEP: Destroying namespace "configmap-1289" for this suite. 08/24/23 12:33:57.744 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Subpath Atomic writer volumes - should support subpaths with projected pod [Conformance] - test/e2e/storage/subpath.go:106 -[BeforeEach] [sig-storage] Subpath +[sig-storage] Secrets + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:68 +[BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:25:46.507 -Jul 29 16:25:46.507: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename subpath 07/29/23 16:25:46.51 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:25:46.547 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:25:46.552 -[BeforeEach] [sig-storage] Subpath +STEP: Creating a kubernetes client 08/24/23 12:33:57.771 +Aug 24 12:33:57.771: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 12:33:57.773 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:57.802 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:57.805 +[BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] Atomic writer volumes - test/e2e/storage/subpath.go:40 -STEP: Setting up data 07/29/23 16:25:46.558 -[It] should support subpaths with projected pod [Conformance] - test/e2e/storage/subpath.go:106 -STEP: Creating pod pod-subpath-test-projected-69vg 07/29/23 16:25:46.579 -STEP: Creating a pod to test atomic-volume-subpath 07/29/23 16:25:46.579 -Jul 29 16:25:46.597: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-69vg" in namespace "subpath-2867" to be "Succeeded or Failed" -Jul 29 16:25:46.603: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Pending", Reason="", readiness=false. Elapsed: 5.43595ms -Jul 29 16:25:48.614: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 2.016136202s -Jul 29 16:25:50.611: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 4.013732675s -Jul 29 16:25:52.615: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 6.017364744s -Jul 29 16:25:54.612: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 8.014084624s -Jul 29 16:25:56.611: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 10.013673157s -Jul 29 16:25:58.614: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 12.016602485s -Jul 29 16:26:00.615: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 14.01694384s -Jul 29 16:26:02.613: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 16.015368244s -Jul 29 16:26:04.614: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 18.016230289s -Jul 29 16:26:06.610: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 20.012766175s -Jul 29 16:26:08.612: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=false. Elapsed: 22.014604133s -Jul 29 16:26:10.615: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.017354561s -STEP: Saw pod success 07/29/23 16:26:10.615 -Jul 29 16:26:10.616: INFO: Pod "pod-subpath-test-projected-69vg" satisfied condition "Succeeded or Failed" -Jul 29 16:26:10.623: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-subpath-test-projected-69vg container test-container-subpath-projected-69vg: -STEP: delete the pod 07/29/23 16:26:10.646 -Jul 29 16:26:10.672: INFO: Waiting for pod pod-subpath-test-projected-69vg to disappear -Jul 29 16:26:10.679: INFO: Pod pod-subpath-test-projected-69vg no longer exists -STEP: Deleting pod pod-subpath-test-projected-69vg 07/29/23 16:26:10.679 -Jul 29 16:26:10.679: INFO: Deleting pod "pod-subpath-test-projected-69vg" in namespace "subpath-2867" -[AfterEach] [sig-storage] Subpath +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:68 +STEP: Creating secret with name secret-test-c72672b6-e67d-4a2a-9043-66f32eb370ba 08/24/23 12:33:57.811 +STEP: Creating a pod to test consume secrets 08/24/23 12:33:57.82 +Aug 24 12:33:57.836: INFO: Waiting up to 5m0s for pod "pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f" in namespace "secrets-8130" to be "Succeeded or Failed" +Aug 24 12:33:57.846: INFO: Pod "pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f": Phase="Pending", Reason="", readiness=false. Elapsed: 9.705174ms +Aug 24 12:33:59.852: INFO: Pod "pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016038036s +Aug 24 12:34:01.851: INFO: Pod "pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015319542s +STEP: Saw pod success 08/24/23 12:34:01.852 +Aug 24 12:34:01.852: INFO: Pod "pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f" satisfied condition "Succeeded or Failed" +Aug 24 12:34:01.858: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f container secret-volume-test: +STEP: delete the pod 08/24/23 12:34:01.871 +Aug 24 12:34:01.889: INFO: Waiting for pod pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f to disappear +Aug 24 12:34:01.894: INFO: Pod pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f no longer exists +[AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 16:26:10.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Subpath +Aug 24 12:34:01.894: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Subpath +[DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Subpath +[DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "subpath-2867" for this suite. 07/29/23 16:26:10.693 +STEP: Destroying namespace "secrets-8130" for this suite. 08/24/23 12:34:01.902 ------------------------------ -• [SLOW TEST] [24.208 seconds] -[sig-storage] Subpath -test/e2e/storage/utils/framework.go:23 - Atomic writer volumes - test/e2e/storage/subpath.go:36 - should support subpaths with projected pod [Conformance] - test/e2e/storage/subpath.go:106 +• [4.142 seconds] +[sig-storage] Secrets +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:68 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Subpath + [BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:25:46.507 - Jul 29 16:25:46.507: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename subpath 07/29/23 16:25:46.51 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:25:46.547 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:25:46.552 - [BeforeEach] [sig-storage] Subpath + STEP: Creating a kubernetes client 08/24/23 12:33:57.771 + Aug 24 12:33:57.771: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 12:33:57.773 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:33:57.802 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:33:57.805 + [BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] Atomic writer volumes - test/e2e/storage/subpath.go:40 - STEP: Setting up data 07/29/23 16:25:46.558 - [It] should support subpaths with projected pod [Conformance] - test/e2e/storage/subpath.go:106 - STEP: Creating pod pod-subpath-test-projected-69vg 07/29/23 16:25:46.579 - STEP: Creating a pod to test atomic-volume-subpath 07/29/23 16:25:46.579 - Jul 29 16:25:46.597: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-69vg" in namespace "subpath-2867" to be "Succeeded or Failed" - Jul 29 16:25:46.603: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Pending", Reason="", readiness=false. Elapsed: 5.43595ms - Jul 29 16:25:48.614: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 2.016136202s - Jul 29 16:25:50.611: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 4.013732675s - Jul 29 16:25:52.615: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 6.017364744s - Jul 29 16:25:54.612: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 8.014084624s - Jul 29 16:25:56.611: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 10.013673157s - Jul 29 16:25:58.614: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 12.016602485s - Jul 29 16:26:00.615: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 14.01694384s - Jul 29 16:26:02.613: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 16.015368244s - Jul 29 16:26:04.614: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 18.016230289s - Jul 29 16:26:06.610: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=true. Elapsed: 20.012766175s - Jul 29 16:26:08.612: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Running", Reason="", readiness=false. Elapsed: 22.014604133s - Jul 29 16:26:10.615: INFO: Pod "pod-subpath-test-projected-69vg": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.017354561s - STEP: Saw pod success 07/29/23 16:26:10.615 - Jul 29 16:26:10.616: INFO: Pod "pod-subpath-test-projected-69vg" satisfied condition "Succeeded or Failed" - Jul 29 16:26:10.623: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-subpath-test-projected-69vg container test-container-subpath-projected-69vg: - STEP: delete the pod 07/29/23 16:26:10.646 - Jul 29 16:26:10.672: INFO: Waiting for pod pod-subpath-test-projected-69vg to disappear - Jul 29 16:26:10.679: INFO: Pod pod-subpath-test-projected-69vg no longer exists - STEP: Deleting pod pod-subpath-test-projected-69vg 07/29/23 16:26:10.679 - Jul 29 16:26:10.679: INFO: Deleting pod "pod-subpath-test-projected-69vg" in namespace "subpath-2867" - [AfterEach] [sig-storage] Subpath + [It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:68 + STEP: Creating secret with name secret-test-c72672b6-e67d-4a2a-9043-66f32eb370ba 08/24/23 12:33:57.811 + STEP: Creating a pod to test consume secrets 08/24/23 12:33:57.82 + Aug 24 12:33:57.836: INFO: Waiting up to 5m0s for pod "pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f" in namespace "secrets-8130" to be "Succeeded or Failed" + Aug 24 12:33:57.846: INFO: Pod "pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f": Phase="Pending", Reason="", readiness=false. Elapsed: 9.705174ms + Aug 24 12:33:59.852: INFO: Pod "pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016038036s + Aug 24 12:34:01.851: INFO: Pod "pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015319542s + STEP: Saw pod success 08/24/23 12:34:01.852 + Aug 24 12:34:01.852: INFO: Pod "pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f" satisfied condition "Succeeded or Failed" + Aug 24 12:34:01.858: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f container secret-volume-test: + STEP: delete the pod 08/24/23 12:34:01.871 + Aug 24 12:34:01.889: INFO: Waiting for pod pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f to disappear + Aug 24 12:34:01.894: INFO: Pod pod-secrets-048bb86b-93b2-4407-88b6-d28af02d2b9f no longer exists + [AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 16:26:10.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Subpath + Aug 24 12:34:01.894: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Subpath + [DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Subpath + [DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "subpath-2867" for this suite. 07/29/23 16:26:10.693 + STEP: Destroying namespace "secrets-8130" for this suite. 08/24/23 12:34:01.902 << End Captured GinkgoWriter Output ------------------------------ -SSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for multiple CRDs of same group but different versions [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:309 + works for CRD preserving unknown fields in an embedded object [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:236 [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:26:10.721 -Jul 29 16:26:10.721: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 16:26:10.723 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:26:10.76 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:26:10.766 +STEP: Creating a kubernetes client 08/24/23 12:34:01.925 +Aug 24 12:34:01.925: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:34:01.927 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:34:01.962 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:34:01.968 [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] works for multiple CRDs of same group but different versions [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:309 -STEP: CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation 07/29/23 16:26:10.771 -Jul 29 16:26:10.772: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation 07/29/23 16:26:20.816 -Jul 29 16:26:20.818: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:26:23.916: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 +[It] works for CRD preserving unknown fields in an embedded object [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:236 +Aug 24 12:34:01.975: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 08/24/23 12:34:04.657 +Aug 24 12:34:04.658: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8763 --namespace=crd-publish-openapi-8763 create -f -' +Aug 24 12:34:06.100: INFO: stderr: "" +Aug 24 12:34:06.100: INFO: stdout: "e2e-test-crd-publish-openapi-4804-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" +Aug 24 12:34:06.100: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8763 --namespace=crd-publish-openapi-8763 delete e2e-test-crd-publish-openapi-4804-crds test-cr' +Aug 24 12:34:06.260: INFO: stderr: "" +Aug 24 12:34:06.260: INFO: stdout: "e2e-test-crd-publish-openapi-4804-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" +Aug 24 12:34:06.260: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8763 --namespace=crd-publish-openapi-8763 apply -f -' +Aug 24 12:34:06.668: INFO: stderr: "" +Aug 24 12:34:06.668: INFO: stdout: "e2e-test-crd-publish-openapi-4804-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" +Aug 24 12:34:06.669: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8763 --namespace=crd-publish-openapi-8763 delete e2e-test-crd-publish-openapi-4804-crds test-cr' +Aug 24 12:34:06.816: INFO: stderr: "" +Aug 24 12:34:06.816: INFO: stdout: "e2e-test-crd-publish-openapi-4804-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" +STEP: kubectl explain works to explain CR 08/24/23 12:34:06.816 +Aug 24 12:34:06.816: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8763 explain e2e-test-crd-publish-openapi-4804-crds' +Aug 24 12:34:08.183: INFO: stderr: "" +Aug 24 12:34:08.183: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4804-crd\nVERSION: crd-publish-openapi-test-unknown-in-nested.example.com/v1\n\nDESCRIPTION:\n preserve-unknown-properties in nested field for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t<>\n Specification of Waldo\n\n status\t\n Status of Waldo\n\n" [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:26:33.657: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:34:10.665: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "crd-publish-openapi-4599" for this suite. 07/29/23 16:26:33.678 +STEP: Destroying namespace "crd-publish-openapi-8763" for this suite. 08/24/23 12:34:10.685 ------------------------------ -• [SLOW TEST] [22.971 seconds] +• [SLOW TEST] [8.771 seconds] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/apimachinery/framework.go:23 - works for multiple CRDs of same group but different versions [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:309 + works for CRD preserving unknown fields in an embedded object [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:236 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:26:10.721 - Jul 29 16:26:10.721: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 16:26:10.723 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:26:10.76 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:26:10.766 + STEP: Creating a kubernetes client 08/24/23 12:34:01.925 + Aug 24 12:34:01.925: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:34:01.927 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:34:01.962 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:34:01.968 [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] works for multiple CRDs of same group but different versions [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:309 - STEP: CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation 07/29/23 16:26:10.771 - Jul 29 16:26:10.772: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation 07/29/23 16:26:20.816 - Jul 29 16:26:20.818: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:26:23.916: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 + [It] works for CRD preserving unknown fields in an embedded object [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:236 + Aug 24 12:34:01.975: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 08/24/23 12:34:04.657 + Aug 24 12:34:04.658: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8763 --namespace=crd-publish-openapi-8763 create -f -' + Aug 24 12:34:06.100: INFO: stderr: "" + Aug 24 12:34:06.100: INFO: stdout: "e2e-test-crd-publish-openapi-4804-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" + Aug 24 12:34:06.100: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8763 --namespace=crd-publish-openapi-8763 delete e2e-test-crd-publish-openapi-4804-crds test-cr' + Aug 24 12:34:06.260: INFO: stderr: "" + Aug 24 12:34:06.260: INFO: stdout: "e2e-test-crd-publish-openapi-4804-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" + Aug 24 12:34:06.260: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8763 --namespace=crd-publish-openapi-8763 apply -f -' + Aug 24 12:34:06.668: INFO: stderr: "" + Aug 24 12:34:06.668: INFO: stdout: "e2e-test-crd-publish-openapi-4804-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" + Aug 24 12:34:06.669: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8763 --namespace=crd-publish-openapi-8763 delete e2e-test-crd-publish-openapi-4804-crds test-cr' + Aug 24 12:34:06.816: INFO: stderr: "" + Aug 24 12:34:06.816: INFO: stdout: "e2e-test-crd-publish-openapi-4804-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" + STEP: kubectl explain works to explain CR 08/24/23 12:34:06.816 + Aug 24 12:34:06.816: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8763 explain e2e-test-crd-publish-openapi-4804-crds' + Aug 24 12:34:08.183: INFO: stderr: "" + Aug 24 12:34:08.183: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4804-crd\nVERSION: crd-publish-openapi-test-unknown-in-nested.example.com/v1\n\nDESCRIPTION:\n preserve-unknown-properties in nested field for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t<>\n Specification of Waldo\n\n status\t\n Status of Waldo\n\n" [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:26:33.657: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:34:10.665: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "crd-publish-openapi-4599" for this suite. 07/29/23 16:26:33.678 + STEP: Destroying namespace "crd-publish-openapi-8763" for this suite. 08/24/23 12:34:10.685 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] test/e2e/scheduling/predicates.go:704 [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:26:33.701 -Jul 29 16:26:33.702: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sched-pred 07/29/23 16:26:33.704 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:26:33.736 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:26:33.741 +STEP: Creating a kubernetes client 08/24/23 12:34:10.71 +Aug 24 12:34:10.711: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sched-pred 08/24/23 12:34:10.715 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:34:10.746 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:34:10.75 [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/scheduling/predicates.go:97 -Jul 29 16:26:33.745: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready -Jul 29 16:26:33.762: INFO: Waiting for terminating namespaces to be deleted... -Jul 29 16:26:33.769: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-1 before test -Jul 29 16:26:33.799: INFO: cilium-cdv47 from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.799: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:26:33.800: INFO: cilium-node-init-jdrzm from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.800: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:26:33.800: INFO: coredns-787d4945fb-2xpvx from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.800: INFO: Container coredns ready: true, restart count 0 -Jul 29 16:26:33.800: INFO: coredns-787d4945fb-clg7z from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.800: INFO: Container coredns ready: true, restart count 0 -Jul 29 16:26:33.800: INFO: kube-addon-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.801: INFO: Container kube-addon-manager ready: true, restart count 0 -Jul 29 16:26:33.801: INFO: kube-apiserver-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.801: INFO: Container kube-apiserver ready: true, restart count 0 -Jul 29 16:26:33.801: INFO: kube-controller-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.801: INFO: Container kube-controller-manager ready: true, restart count 0 -Jul 29 16:26:33.801: INFO: kube-proxy-zc9m8 from kube-system started at 2023-07-29 15:13:58 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.801: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:26:33.801: INFO: kube-scheduler-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.801: INFO: Container kube-scheduler ready: true, restart count 0 -Jul 29 16:26:33.802: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:26:33.802: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:26:33.802: INFO: Container systemd-logs ready: true, restart count 0 -Jul 29 16:26:33.802: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-2 before test -Jul 29 16:26:33.830: INFO: cilium-kxphw from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.830: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:26:33.830: INFO: cilium-node-init-fqx5t from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.830: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:26:33.830: INFO: cilium-operator-8c499d9f6-hfgjd from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.830: INFO: Container cilium-operator ready: true, restart count 0 -Jul 29 16:26:33.830: INFO: kube-addon-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.830: INFO: Container kube-addon-manager ready: true, restart count 0 -Jul 29 16:26:33.830: INFO: kube-apiserver-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.830: INFO: Container kube-apiserver ready: true, restart count 0 -Jul 29 16:26:33.830: INFO: kube-controller-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.830: INFO: Container kube-controller-manager ready: true, restart count 0 -Jul 29 16:26:33.830: INFO: kube-proxy-gzqkk from kube-system started at 2023-07-29 15:14:12 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.830: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:26:33.830: INFO: kube-scheduler-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.830: INFO: Container kube-scheduler ready: true, restart count 0 -Jul 29 16:26:33.830: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-xnwv4 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:26:33.830: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:26:33.830: INFO: Container systemd-logs ready: true, restart count 0 -Jul 29 16:26:33.830: INFO: -Logging pods the apiserver thinks is on node wetuj3nuajog-3 before test -Jul 29 16:26:33.851: INFO: cilium-node-init-9ghzk from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.851: INFO: Container node-init ready: true, restart count 0 -Jul 29 16:26:33.851: INFO: cilium-v9c5p from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.851: INFO: Container cilium-agent ready: true, restart count 0 -Jul 29 16:26:33.851: INFO: kube-proxy-v77tx from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.851: INFO: Container kube-proxy ready: true, restart count 0 -Jul 29 16:26:33.851: INFO: sonobuoy from sonobuoy started at 2023-07-29 15:28:59 +0000 UTC (1 container statuses recorded) -Jul 29 16:26:33.851: INFO: Container kube-sonobuoy ready: true, restart count 0 -Jul 29 16:26:33.852: INFO: sonobuoy-e2e-job-7bf00df102b6496e from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:26:33.852: INFO: Container e2e ready: true, restart count 0 -Jul 29 16:26:33.852: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:26:33.852: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-k84f6 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) -Jul 29 16:26:33.852: INFO: Container sonobuoy-worker ready: true, restart count 0 -Jul 29 16:26:33.852: INFO: Container systemd-logs ready: true, restart count 0 +Aug 24 12:34:10.755: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Aug 24 12:34:10.770: INFO: Waiting for terminating namespaces to be deleted... +Aug 24 12:34:10.777: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-1 before test +Aug 24 12:34:10.788: INFO: cilium-node-init-wqpdx from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.789: INFO: Container node-init ready: true, restart count 0 +Aug 24 12:34:10.789: INFO: cilium-wpzgb from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.789: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 12:34:10.789: INFO: coredns-787d4945fb-8jnm5 from kube-system started at 2023-08-24 11:24:04 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.789: INFO: Container coredns ready: true, restart count 0 +Aug 24 12:34:10.789: INFO: coredns-787d4945fb-d76z6 from kube-system started at 2023-08-24 11:24:07 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.790: INFO: Container coredns ready: true, restart count 0 +Aug 24 12:34:10.790: INFO: kube-addon-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.790: INFO: Container kube-addon-manager ready: true, restart count 0 +Aug 24 12:34:10.790: INFO: kube-apiserver-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.790: INFO: Container kube-apiserver ready: true, restart count 0 +Aug 24 12:34:10.790: INFO: kube-controller-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.790: INFO: Container kube-controller-manager ready: true, restart count 0 +Aug 24 12:34:10.791: INFO: kube-proxy-nr5bs from kube-system started at 2023-08-24 11:21:24 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.791: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 12:34:10.791: INFO: kube-scheduler-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.791: INFO: Container kube-scheduler ready: true, restart count 0 +Aug 24 12:34:10.791: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) +Aug 24 12:34:10.791: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:34:10.791: INFO: Container systemd-logs ready: true, restart count 0 +Aug 24 12:34:10.792: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-2 before test +Aug 24 12:34:10.809: INFO: cilium-node-init-95cbk from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.809: INFO: Container node-init ready: true, restart count 0 +Aug 24 12:34:10.809: INFO: cilium-operator-75f7897945-8qqz2 from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.809: INFO: Container cilium-operator ready: true, restart count 0 +Aug 24 12:34:10.809: INFO: cilium-rcknz from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.809: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 12:34:10.809: INFO: kube-addon-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:37 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.809: INFO: Container kube-addon-manager ready: true, restart count 0 +Aug 24 12:34:10.809: INFO: kube-apiserver-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.809: INFO: Container kube-apiserver ready: true, restart count 0 +Aug 24 12:34:10.809: INFO: kube-controller-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.809: INFO: Container kube-controller-manager ready: true, restart count 0 +Aug 24 12:34:10.809: INFO: kube-proxy-lm2dm from kube-system started at 2023-08-24 11:22:03 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.809: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 12:34:10.809: INFO: kube-scheduler-pe9deep4seen-2 from kube-system started at 2023-08-24 11:25:19 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.810: INFO: Container kube-scheduler ready: true, restart count 0 +Aug 24 12:34:10.810: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-nxmsl from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) +Aug 24 12:34:10.810: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:34:10.810: INFO: Container systemd-logs ready: true, restart count 0 +Aug 24 12:34:10.810: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-3 before test +Aug 24 12:34:10.825: INFO: cilium-node-init-pdcw9 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.825: INFO: Container node-init ready: true, restart count 0 +Aug 24 12:34:10.825: INFO: cilium-xgc44 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.825: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 12:34:10.825: INFO: kube-proxy-8vv8d from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.825: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 12:34:10.825: INFO: sonobuoy from sonobuoy started at 2023-08-24 11:38:19 +0000 UTC (1 container statuses recorded) +Aug 24 12:34:10.825: INFO: Container kube-sonobuoy ready: true, restart count 0 +Aug 24 12:34:10.825: INFO: sonobuoy-e2e-job-b3f52dde3e8a4a4e from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) +Aug 24 12:34:10.825: INFO: Container e2e ready: true, restart count 0 +Aug 24 12:34:10.825: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:34:10.825: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-p6l72 from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) +Aug 24 12:34:10.825: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:34:10.825: INFO: Container systemd-logs ready: true, restart count 0 [It] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] test/e2e/scheduling/predicates.go:704 -STEP: Trying to launch a pod without a label to get a node which can launch it. 07/29/23 16:26:33.852 -Jul 29 16:26:33.871: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-pred-6768" to be "running" -Jul 29 16:26:33.877: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 6.136891ms -Jul 29 16:26:35.918: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.04783491s -Jul 29 16:26:35.919: INFO: Pod "without-label" satisfied condition "running" -STEP: Explicitly delete pod here to free the resource it takes. 07/29/23 16:26:35.93 -STEP: Trying to apply a random label on the found node. 07/29/23 16:26:35.97 -STEP: verifying the node has the label kubernetes.io/e2e-04e648e6-ec38-46c3-9518-31743e399ea9 95 07/29/23 16:26:35.989 -STEP: Trying to create a pod(pod4) with hostport 54322 and hostIP 0.0.0.0(empty string here) and expect scheduled 07/29/23 16:26:36.002 -Jul 29 16:26:36.013: INFO: Waiting up to 5m0s for pod "pod4" in namespace "sched-pred-6768" to be "not pending" -Jul 29 16:26:36.019: INFO: Pod "pod4": Phase="Pending", Reason="", readiness=false. Elapsed: 5.896311ms -Jul 29 16:26:38.028: INFO: Pod "pod4": Phase="Running", Reason="", readiness=true. Elapsed: 2.015241511s -Jul 29 16:26:38.028: INFO: Pod "pod4" satisfied condition "not pending" -STEP: Trying to create another pod(pod5) with hostport 54322 but hostIP 192.168.121.141 on the node which pod4 resides and expect not scheduled 07/29/23 16:26:38.029 -Jul 29 16:26:38.039: INFO: Waiting up to 5m0s for pod "pod5" in namespace "sched-pred-6768" to be "not pending" -Jul 29 16:26:38.048: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 8.699222ms -Jul 29 16:26:40.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019445414s -Jul 29 16:26:42.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4.017921086s -Jul 29 16:26:44.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.018059714s -Jul 29 16:26:46.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 8.016811615s -Jul 29 16:26:48.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 10.016006082s -Jul 29 16:26:50.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 12.016955857s -Jul 29 16:26:52.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 14.018361824s -Jul 29 16:26:54.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 16.01786188s -Jul 29 16:26:56.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 18.017402232s -Jul 29 16:26:58.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 20.017195229s -Jul 29 16:27:00.062: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 22.02291167s -Jul 29 16:27:02.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 24.019068934s -Jul 29 16:27:04.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 26.016399641s -Jul 29 16:27:06.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 28.019988991s -Jul 29 16:27:08.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 30.020595558s -Jul 29 16:27:10.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 32.018770096s -Jul 29 16:27:12.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 34.020221748s -Jul 29 16:27:14.062: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 36.02254201s -Jul 29 16:27:16.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 38.015215185s -Jul 29 16:27:18.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 40.017414509s -Jul 29 16:27:20.061: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 42.022392065s -Jul 29 16:27:22.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 44.017036833s -Jul 29 16:27:24.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 46.018638197s -Jul 29 16:27:26.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 48.016987515s -Jul 29 16:27:28.065: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 50.0260014s -Jul 29 16:27:30.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 52.019162775s -Jul 29 16:27:32.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 54.017616883s -Jul 29 16:27:34.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 56.019203506s -Jul 29 16:27:36.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 58.018324142s -Jul 29 16:27:38.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.017714553s -Jul 29 16:27:40.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.01754003s -Jul 29 16:27:42.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.016740801s -Jul 29 16:27:44.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.01652739s -Jul 29 16:27:46.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.018162882s -Jul 29 16:27:48.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.017826334s -Jul 29 16:27:50.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.015897946s -Jul 29 16:27:52.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.018339353s -Jul 29 16:27:54.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.018262311s -Jul 29 16:27:56.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.017697288s -Jul 29 16:27:58.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.016190473s -Jul 29 16:28:00.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.01683044s -Jul 29 16:28:02.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.016913339s -Jul 29 16:28:04.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.016538584s -Jul 29 16:28:06.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.017577299s -Jul 29 16:28:08.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.018357569s -Jul 29 16:28:10.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.015475285s -Jul 29 16:28:12.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.017893168s -Jul 29 16:28:14.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.016970102s -Jul 29 16:28:16.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.019184869s -Jul 29 16:28:18.062: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.023155475s -Jul 29 16:28:20.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.020780354s -Jul 29 16:28:22.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.016204953s -Jul 29 16:28:24.061: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.021987506s -Jul 29 16:28:26.063: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.023845676s -Jul 29 16:28:28.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.020585739s -Jul 29 16:28:30.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.020345244s -Jul 29 16:28:32.069: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.029597264s -Jul 29 16:28:34.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.017793185s -Jul 29 16:28:36.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.01865733s -Jul 29 16:28:38.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.017876317s -Jul 29 16:28:40.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m2.016831507s -Jul 29 16:28:42.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m4.018916195s -Jul 29 16:28:44.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m6.019668907s -Jul 29 16:28:46.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m8.016880628s -Jul 29 16:28:48.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m10.016701545s -Jul 29 16:28:50.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m12.020222677s -Jul 29 16:28:52.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m14.019934405s -Jul 29 16:28:54.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m16.018250102s -Jul 29 16:28:56.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m18.019005116s -Jul 29 16:28:58.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m20.015938105s -Jul 29 16:29:00.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m22.020603473s -Jul 29 16:29:02.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m24.016492031s -Jul 29 16:29:04.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m26.01879245s -Jul 29 16:29:06.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m28.020494636s -Jul 29 16:29:08.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m30.017897496s -Jul 29 16:29:10.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m32.020656378s -Jul 29 16:29:12.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m34.020381707s -Jul 29 16:29:14.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m36.017060459s -Jul 29 16:29:16.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m38.017544965s -Jul 29 16:29:18.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m40.018441318s -Jul 29 16:29:20.064: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m42.02474963s -Jul 29 16:29:22.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m44.018968355s -Jul 29 16:29:24.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m46.020305707s -Jul 29 16:29:26.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m48.019014234s -Jul 29 16:29:28.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m50.018629377s -Jul 29 16:29:30.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m52.018698644s -Jul 29 16:29:32.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m54.020978666s -Jul 29 16:29:34.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m56.018463931s -Jul 29 16:29:36.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m58.019909292s -Jul 29 16:29:38.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m0.016720963s -Jul 29 16:29:40.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m2.01599087s -Jul 29 16:29:42.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m4.01992827s -Jul 29 16:29:44.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m6.016267313s -Jul 29 16:29:46.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m8.018041522s -Jul 29 16:29:48.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m10.017798851s -Jul 29 16:29:50.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m12.018780142s -Jul 29 16:29:52.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m14.018868666s -Jul 29 16:29:54.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m16.019065506s -Jul 29 16:29:56.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m18.015455724s -Jul 29 16:29:58.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m20.018806624s -Jul 29 16:30:00.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m22.01598351s -Jul 29 16:30:02.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m24.018921351s -Jul 29 16:30:04.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m26.016816972s -Jul 29 16:30:06.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m28.017704419s -Jul 29 16:30:08.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m30.016367366s -Jul 29 16:30:10.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m32.020556639s -Jul 29 16:30:12.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m34.02061631s -Jul 29 16:30:14.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m36.019025981s -Jul 29 16:30:16.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m38.016427108s -Jul 29 16:30:18.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m40.019837326s -Jul 29 16:30:20.061: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m42.021544774s -Jul 29 16:30:22.061: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m44.022248328s -Jul 29 16:30:24.064: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m46.025094839s -Jul 29 16:30:26.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m48.018479605s -Jul 29 16:30:28.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m50.020044379s -Jul 29 16:30:30.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m52.019405446s -Jul 29 16:30:32.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m54.018565725s -Jul 29 16:30:34.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m56.019732227s -Jul 29 16:30:36.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m58.019028695s -Jul 29 16:30:38.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m0.017015851s -Jul 29 16:30:40.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m2.014747442s -Jul 29 16:30:42.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m4.019671476s -Jul 29 16:30:44.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m6.021043148s -Jul 29 16:30:46.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m8.017299242s -Jul 29 16:30:48.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m10.017353817s -Jul 29 16:30:50.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m12.01859539s -Jul 29 16:30:52.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m14.018767959s -Jul 29 16:30:54.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m16.019877366s -Jul 29 16:30:56.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m18.016244457s -Jul 29 16:30:58.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m20.01707616s -Jul 29 16:31:00.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m22.01747715s -Jul 29 16:31:02.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m24.016297599s -Jul 29 16:31:04.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m26.01577876s -Jul 29 16:31:06.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m28.018764734s -Jul 29 16:31:08.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m30.018251442s -Jul 29 16:31:10.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m32.01702909s -Jul 29 16:31:12.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m34.017682018s -Jul 29 16:31:14.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m36.019126564s -Jul 29 16:31:16.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m38.018689949s -Jul 29 16:31:18.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m40.015392494s -Jul 29 16:31:20.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m42.017313596s -Jul 29 16:31:22.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m44.019252055s -Jul 29 16:31:24.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m46.017150812s -Jul 29 16:31:26.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m48.01863926s -Jul 29 16:31:28.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m50.018872034s -Jul 29 16:31:30.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m52.01932638s -Jul 29 16:31:32.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m54.021485729s -Jul 29 16:31:34.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m56.018486925s -Jul 29 16:31:36.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m58.018961863s -Jul 29 16:31:38.061: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.022082154s -Jul 29 16:31:38.067: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.027916483s -STEP: removing the label kubernetes.io/e2e-04e648e6-ec38-46c3-9518-31743e399ea9 off the node wetuj3nuajog-3 07/29/23 16:31:38.067 -STEP: verifying the node doesn't have the label kubernetes.io/e2e-04e648e6-ec38-46c3-9518-31743e399ea9 07/29/23 16:31:38.109 +STEP: Trying to launch a pod without a label to get a node which can launch it. 08/24/23 12:34:10.826 +Aug 24 12:34:10.840: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-pred-3134" to be "running" +Aug 24 12:34:10.856: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 16.315186ms +Aug 24 12:34:12.864: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.023854803s +Aug 24 12:34:12.864: INFO: Pod "without-label" satisfied condition "running" +STEP: Explicitly delete pod here to free the resource it takes. 08/24/23 12:34:12.869 +STEP: Trying to apply a random label on the found node. 08/24/23 12:34:12.896 +STEP: verifying the node has the label kubernetes.io/e2e-8ef54f1b-edb3-4554-9dfa-c5fd4f2f3046 95 08/24/23 12:34:12.919 +STEP: Trying to create a pod(pod4) with hostport 54322 and hostIP 0.0.0.0(empty string here) and expect scheduled 08/24/23 12:34:12.928 +Aug 24 12:34:12.951: INFO: Waiting up to 5m0s for pod "pod4" in namespace "sched-pred-3134" to be "not pending" +Aug 24 12:34:12.972: INFO: Pod "pod4": Phase="Pending", Reason="", readiness=false. Elapsed: 19.160199ms +Aug 24 12:34:14.978: INFO: Pod "pod4": Phase="Running", Reason="", readiness=true. Elapsed: 2.025701785s +Aug 24 12:34:14.978: INFO: Pod "pod4" satisfied condition "not pending" +STEP: Trying to create another pod(pod5) with hostport 54322 but hostIP 192.168.121.130 on the node which pod4 resides and expect not scheduled 08/24/23 12:34:14.978 +Aug 24 12:34:14.991: INFO: Waiting up to 5m0s for pod "pod5" in namespace "sched-pred-3134" to be "not pending" +Aug 24 12:34:14.997: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.397528ms +Aug 24 12:34:17.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016187065s +Aug 24 12:34:19.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4.016568496s +Aug 24 12:34:21.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.013403989s +Aug 24 12:34:23.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 8.013528132s +Aug 24 12:34:25.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 10.012705109s +Aug 24 12:34:27.035: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 12.044146317s +Aug 24 12:34:29.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 14.013334662s +Aug 24 12:34:31.034: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 16.042684124s +Aug 24 12:34:33.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 18.016794325s +Aug 24 12:34:35.012: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 20.020865805s +Aug 24 12:34:37.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 22.013676929s +Aug 24 12:34:39.011: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 24.019658769s +Aug 24 12:34:41.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 26.012651478s +Aug 24 12:34:43.042: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 28.051525807s +Aug 24 12:34:45.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 30.013439678s +Aug 24 12:34:47.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 32.0139782s +Aug 24 12:34:49.003: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 34.012498397s +Aug 24 12:34:51.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 36.015442515s +Aug 24 12:34:53.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 38.013344845s +Aug 24 12:34:55.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 40.013093367s +Aug 24 12:34:57.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 42.012613544s +Aug 24 12:34:59.042: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 44.051069512s +Aug 24 12:35:01.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 46.012773398s +Aug 24 12:35:03.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 48.017858851s +Aug 24 12:35:05.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 50.015108405s +Aug 24 12:35:07.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 52.014003968s +Aug 24 12:35:09.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 54.013086127s +Aug 24 12:35:11.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 56.015330838s +Aug 24 12:35:13.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 58.017919634s +Aug 24 12:35:15.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.014412179s +Aug 24 12:35:17.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.014244027s +Aug 24 12:35:19.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.017111816s +Aug 24 12:35:21.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.016786902s +Aug 24 12:35:23.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.01406122s +Aug 24 12:35:25.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.015425437s +Aug 24 12:35:27.003: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.012307705s +Aug 24 12:35:29.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.01346553s +Aug 24 12:35:31.011: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.02050172s +Aug 24 12:35:33.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.016939847s +Aug 24 12:35:35.012: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.021296425s +Aug 24 12:35:37.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.017135965s +Aug 24 12:35:39.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.012871283s +Aug 24 12:35:41.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.01456168s +Aug 24 12:35:43.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.014558129s +Aug 24 12:35:45.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.014685166s +Aug 24 12:35:47.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.016356525s +Aug 24 12:35:49.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.01429223s +Aug 24 12:35:51.011: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.02038706s +Aug 24 12:35:53.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.015931002s +Aug 24 12:35:55.051: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.060477899s +Aug 24 12:35:57.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.013418063s +Aug 24 12:35:59.035: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.044322s +Aug 24 12:36:01.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.015218911s +Aug 24 12:36:03.012: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.020676964s +Aug 24 12:36:05.003: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.01245747s +Aug 24 12:36:07.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.015057905s +Aug 24 12:36:09.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.013011806s +Aug 24 12:36:11.011: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.020167171s +Aug 24 12:36:13.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.015253157s +Aug 24 12:36:15.010: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.019478707s +Aug 24 12:36:17.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m2.014282576s +Aug 24 12:36:19.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m4.013106138s +Aug 24 12:36:21.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m6.013897819s +Aug 24 12:36:23.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m8.016215212s +Aug 24 12:36:25.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m10.013073119s +Aug 24 12:36:27.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m12.012559931s +Aug 24 12:36:29.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m14.015658704s +Aug 24 12:36:31.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m16.017172631s +Aug 24 12:36:33.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m18.017891698s +Aug 24 12:36:35.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m20.01378755s +Aug 24 12:36:37.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m22.014264199s +Aug 24 12:36:39.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m24.013571669s +Aug 24 12:36:41.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m26.013553699s +Aug 24 12:36:43.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m28.01384054s +Aug 24 12:36:45.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m30.014759394s +Aug 24 12:36:47.037: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m32.046019365s +Aug 24 12:36:49.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m34.015881022s +Aug 24 12:36:51.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m36.063493696s +Aug 24 12:36:53.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m38.017693968s +Aug 24 12:36:55.015: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m40.023890743s +Aug 24 12:36:57.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m42.01432793s +Aug 24 12:36:59.045: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m44.054074881s +Aug 24 12:37:01.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m46.012937132s +Aug 24 12:37:03.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m48.013433547s +Aug 24 12:37:05.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m50.01407981s +Aug 24 12:37:07.039: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m52.048368795s +Aug 24 12:37:09.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m54.013297032s +Aug 24 12:37:11.032: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m56.041500755s +Aug 24 12:37:13.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m58.016303414s +Aug 24 12:37:15.031: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m0.040316818s +Aug 24 12:37:17.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m2.01329445s +Aug 24 12:37:19.042: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m4.050683114s +Aug 24 12:37:21.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m6.012712154s +Aug 24 12:37:23.043: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m8.051893617s +Aug 24 12:37:25.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m10.014539184s +Aug 24 12:37:27.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m12.014006634s +Aug 24 12:37:29.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m14.014447159s +Aug 24 12:37:31.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m16.014168908s +Aug 24 12:37:33.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m18.013435872s +Aug 24 12:37:35.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m20.01477666s +Aug 24 12:37:37.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m22.015192492s +Aug 24 12:37:39.014: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m24.023168514s +Aug 24 12:37:41.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m26.015026379s +Aug 24 12:37:43.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m28.014131972s +Aug 24 12:37:45.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m30.01333294s +Aug 24 12:37:47.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m32.014482923s +Aug 24 12:37:49.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m34.01609057s +Aug 24 12:37:51.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m36.016148694s +Aug 24 12:37:53.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m38.015743263s +Aug 24 12:37:55.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m40.013744124s +Aug 24 12:37:57.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m42.01301103s +Aug 24 12:37:59.003: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m44.012241039s +Aug 24 12:38:01.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m46.015568152s +Aug 24 12:38:03.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m48.017184119s +Aug 24 12:38:05.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m50.013847198s +Aug 24 12:38:07.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m52.012842924s +Aug 24 12:38:09.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m54.014022261s +Aug 24 12:38:11.047: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m56.05563053s +Aug 24 12:38:13.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m58.015623238s +Aug 24 12:38:15.034: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m0.04293272s +Aug 24 12:38:17.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m2.015209321s +Aug 24 12:38:19.038: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m4.046986422s +Aug 24 12:38:21.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m6.013864658s +Aug 24 12:38:23.041: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m8.050525387s +Aug 24 12:38:25.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m10.013378497s +Aug 24 12:38:27.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m12.014328822s +Aug 24 12:38:29.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m14.013918314s +Aug 24 12:38:31.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m16.016417539s +Aug 24 12:38:33.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m18.015220934s +Aug 24 12:38:35.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m20.015668812s +Aug 24 12:38:37.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m22.014192325s +Aug 24 12:38:39.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m24.014114335s +Aug 24 12:38:41.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m26.013525295s +Aug 24 12:38:43.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m28.014314154s +Aug 24 12:38:45.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m30.013017388s +Aug 24 12:38:47.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m32.013251854s +Aug 24 12:38:49.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m34.016014154s +Aug 24 12:38:51.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m36.013488897s +Aug 24 12:38:53.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m38.013395367s +Aug 24 12:38:55.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m40.015416814s +Aug 24 12:38:57.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m42.013945027s +Aug 24 12:38:59.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m44.018114515s +Aug 24 12:39:01.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m46.016068089s +Aug 24 12:39:03.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m48.018169905s +Aug 24 12:39:05.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m50.014910916s +Aug 24 12:39:07.010: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m52.019050851s +Aug 24 12:39:09.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m54.015182266s +Aug 24 12:39:11.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m56.015478526s +Aug 24 12:39:13.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m58.014822246s +Aug 24 12:39:15.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.012763705s +Aug 24 12:39:15.010: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.018665829s +STEP: removing the label kubernetes.io/e2e-8ef54f1b-edb3-4554-9dfa-c5fd4f2f3046 off the node pe9deep4seen-3 08/24/23 12:39:15.01 +STEP: verifying the node doesn't have the label kubernetes.io/e2e-8ef54f1b-edb3-4554-9dfa-c5fd4f2f3046 08/24/23 12:39:15.058 [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:31:38.118: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:39:15.069: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/scheduling/predicates.go:88 [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] @@ -19589,5005 +18503,5255 @@ Jul 29 16:31:38.118: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "sched-pred-6768" for this suite. 07/29/23 16:31:38.129 +STEP: Destroying namespace "sched-pred-3134" for this suite. 08/24/23 12:39:15.09 ------------------------------ -• [SLOW TEST] [304.443 seconds] +• [SLOW TEST] [304.408 seconds] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/scheduling/framework.go:40 validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] test/e2e/scheduling/predicates.go:704 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:34:10.71 + Aug 24 12:34:10.711: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sched-pred 08/24/23 12:34:10.715 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:34:10.746 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:34:10.75 + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:97 + Aug 24 12:34:10.755: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready + Aug 24 12:34:10.770: INFO: Waiting for terminating namespaces to be deleted... + Aug 24 12:34:10.777: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-1 before test + Aug 24 12:34:10.788: INFO: cilium-node-init-wqpdx from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.789: INFO: Container node-init ready: true, restart count 0 + Aug 24 12:34:10.789: INFO: cilium-wpzgb from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.789: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 12:34:10.789: INFO: coredns-787d4945fb-8jnm5 from kube-system started at 2023-08-24 11:24:04 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.789: INFO: Container coredns ready: true, restart count 0 + Aug 24 12:34:10.789: INFO: coredns-787d4945fb-d76z6 from kube-system started at 2023-08-24 11:24:07 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.790: INFO: Container coredns ready: true, restart count 0 + Aug 24 12:34:10.790: INFO: kube-addon-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.790: INFO: Container kube-addon-manager ready: true, restart count 0 + Aug 24 12:34:10.790: INFO: kube-apiserver-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.790: INFO: Container kube-apiserver ready: true, restart count 0 + Aug 24 12:34:10.790: INFO: kube-controller-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.790: INFO: Container kube-controller-manager ready: true, restart count 0 + Aug 24 12:34:10.791: INFO: kube-proxy-nr5bs from kube-system started at 2023-08-24 11:21:24 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.791: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 12:34:10.791: INFO: kube-scheduler-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.791: INFO: Container kube-scheduler ready: true, restart count 0 + Aug 24 12:34:10.791: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) + Aug 24 12:34:10.791: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:34:10.791: INFO: Container systemd-logs ready: true, restart count 0 + Aug 24 12:34:10.792: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-2 before test + Aug 24 12:34:10.809: INFO: cilium-node-init-95cbk from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.809: INFO: Container node-init ready: true, restart count 0 + Aug 24 12:34:10.809: INFO: cilium-operator-75f7897945-8qqz2 from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.809: INFO: Container cilium-operator ready: true, restart count 0 + Aug 24 12:34:10.809: INFO: cilium-rcknz from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.809: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 12:34:10.809: INFO: kube-addon-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:37 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.809: INFO: Container kube-addon-manager ready: true, restart count 0 + Aug 24 12:34:10.809: INFO: kube-apiserver-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.809: INFO: Container kube-apiserver ready: true, restart count 0 + Aug 24 12:34:10.809: INFO: kube-controller-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.809: INFO: Container kube-controller-manager ready: true, restart count 0 + Aug 24 12:34:10.809: INFO: kube-proxy-lm2dm from kube-system started at 2023-08-24 11:22:03 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.809: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 12:34:10.809: INFO: kube-scheduler-pe9deep4seen-2 from kube-system started at 2023-08-24 11:25:19 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.810: INFO: Container kube-scheduler ready: true, restart count 0 + Aug 24 12:34:10.810: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-nxmsl from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) + Aug 24 12:34:10.810: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:34:10.810: INFO: Container systemd-logs ready: true, restart count 0 + Aug 24 12:34:10.810: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-3 before test + Aug 24 12:34:10.825: INFO: cilium-node-init-pdcw9 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.825: INFO: Container node-init ready: true, restart count 0 + Aug 24 12:34:10.825: INFO: cilium-xgc44 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.825: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 12:34:10.825: INFO: kube-proxy-8vv8d from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.825: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 12:34:10.825: INFO: sonobuoy from sonobuoy started at 2023-08-24 11:38:19 +0000 UTC (1 container statuses recorded) + Aug 24 12:34:10.825: INFO: Container kube-sonobuoy ready: true, restart count 0 + Aug 24 12:34:10.825: INFO: sonobuoy-e2e-job-b3f52dde3e8a4a4e from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) + Aug 24 12:34:10.825: INFO: Container e2e ready: true, restart count 0 + Aug 24 12:34:10.825: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:34:10.825: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-p6l72 from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) + Aug 24 12:34:10.825: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:34:10.825: INFO: Container systemd-logs ready: true, restart count 0 + [It] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] + test/e2e/scheduling/predicates.go:704 + STEP: Trying to launch a pod without a label to get a node which can launch it. 08/24/23 12:34:10.826 + Aug 24 12:34:10.840: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-pred-3134" to be "running" + Aug 24 12:34:10.856: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 16.315186ms + Aug 24 12:34:12.864: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.023854803s + Aug 24 12:34:12.864: INFO: Pod "without-label" satisfied condition "running" + STEP: Explicitly delete pod here to free the resource it takes. 08/24/23 12:34:12.869 + STEP: Trying to apply a random label on the found node. 08/24/23 12:34:12.896 + STEP: verifying the node has the label kubernetes.io/e2e-8ef54f1b-edb3-4554-9dfa-c5fd4f2f3046 95 08/24/23 12:34:12.919 + STEP: Trying to create a pod(pod4) with hostport 54322 and hostIP 0.0.0.0(empty string here) and expect scheduled 08/24/23 12:34:12.928 + Aug 24 12:34:12.951: INFO: Waiting up to 5m0s for pod "pod4" in namespace "sched-pred-3134" to be "not pending" + Aug 24 12:34:12.972: INFO: Pod "pod4": Phase="Pending", Reason="", readiness=false. Elapsed: 19.160199ms + Aug 24 12:34:14.978: INFO: Pod "pod4": Phase="Running", Reason="", readiness=true. Elapsed: 2.025701785s + Aug 24 12:34:14.978: INFO: Pod "pod4" satisfied condition "not pending" + STEP: Trying to create another pod(pod5) with hostport 54322 but hostIP 192.168.121.130 on the node which pod4 resides and expect not scheduled 08/24/23 12:34:14.978 + Aug 24 12:34:14.991: INFO: Waiting up to 5m0s for pod "pod5" in namespace "sched-pred-3134" to be "not pending" + Aug 24 12:34:14.997: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.397528ms + Aug 24 12:34:17.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016187065s + Aug 24 12:34:19.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4.016568496s + Aug 24 12:34:21.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.013403989s + Aug 24 12:34:23.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 8.013528132s + Aug 24 12:34:25.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 10.012705109s + Aug 24 12:34:27.035: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 12.044146317s + Aug 24 12:34:29.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 14.013334662s + Aug 24 12:34:31.034: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 16.042684124s + Aug 24 12:34:33.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 18.016794325s + Aug 24 12:34:35.012: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 20.020865805s + Aug 24 12:34:37.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 22.013676929s + Aug 24 12:34:39.011: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 24.019658769s + Aug 24 12:34:41.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 26.012651478s + Aug 24 12:34:43.042: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 28.051525807s + Aug 24 12:34:45.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 30.013439678s + Aug 24 12:34:47.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 32.0139782s + Aug 24 12:34:49.003: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 34.012498397s + Aug 24 12:34:51.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 36.015442515s + Aug 24 12:34:53.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 38.013344845s + Aug 24 12:34:55.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 40.013093367s + Aug 24 12:34:57.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 42.012613544s + Aug 24 12:34:59.042: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 44.051069512s + Aug 24 12:35:01.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 46.012773398s + Aug 24 12:35:03.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 48.017858851s + Aug 24 12:35:05.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 50.015108405s + Aug 24 12:35:07.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 52.014003968s + Aug 24 12:35:09.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 54.013086127s + Aug 24 12:35:11.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 56.015330838s + Aug 24 12:35:13.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 58.017919634s + Aug 24 12:35:15.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.014412179s + Aug 24 12:35:17.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.014244027s + Aug 24 12:35:19.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.017111816s + Aug 24 12:35:21.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.016786902s + Aug 24 12:35:23.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.01406122s + Aug 24 12:35:25.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.015425437s + Aug 24 12:35:27.003: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.012307705s + Aug 24 12:35:29.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.01346553s + Aug 24 12:35:31.011: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.02050172s + Aug 24 12:35:33.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.016939847s + Aug 24 12:35:35.012: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.021296425s + Aug 24 12:35:37.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.017135965s + Aug 24 12:35:39.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.012871283s + Aug 24 12:35:41.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.01456168s + Aug 24 12:35:43.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.014558129s + Aug 24 12:35:45.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.014685166s + Aug 24 12:35:47.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.016356525s + Aug 24 12:35:49.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.01429223s + Aug 24 12:35:51.011: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.02038706s + Aug 24 12:35:53.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.015931002s + Aug 24 12:35:55.051: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.060477899s + Aug 24 12:35:57.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.013418063s + Aug 24 12:35:59.035: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.044322s + Aug 24 12:36:01.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.015218911s + Aug 24 12:36:03.012: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.020676964s + Aug 24 12:36:05.003: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.01245747s + Aug 24 12:36:07.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.015057905s + Aug 24 12:36:09.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.013011806s + Aug 24 12:36:11.011: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.020167171s + Aug 24 12:36:13.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.015253157s + Aug 24 12:36:15.010: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.019478707s + Aug 24 12:36:17.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m2.014282576s + Aug 24 12:36:19.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m4.013106138s + Aug 24 12:36:21.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m6.013897819s + Aug 24 12:36:23.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m8.016215212s + Aug 24 12:36:25.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m10.013073119s + Aug 24 12:36:27.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m12.012559931s + Aug 24 12:36:29.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m14.015658704s + Aug 24 12:36:31.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m16.017172631s + Aug 24 12:36:33.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m18.017891698s + Aug 24 12:36:35.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m20.01378755s + Aug 24 12:36:37.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m22.014264199s + Aug 24 12:36:39.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m24.013571669s + Aug 24 12:36:41.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m26.013553699s + Aug 24 12:36:43.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m28.01384054s + Aug 24 12:36:45.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m30.014759394s + Aug 24 12:36:47.037: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m32.046019365s + Aug 24 12:36:49.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m34.015881022s + Aug 24 12:36:51.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m36.063493696s + Aug 24 12:36:53.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m38.017693968s + Aug 24 12:36:55.015: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m40.023890743s + Aug 24 12:36:57.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m42.01432793s + Aug 24 12:36:59.045: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m44.054074881s + Aug 24 12:37:01.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m46.012937132s + Aug 24 12:37:03.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m48.013433547s + Aug 24 12:37:05.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m50.01407981s + Aug 24 12:37:07.039: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m52.048368795s + Aug 24 12:37:09.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m54.013297032s + Aug 24 12:37:11.032: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m56.041500755s + Aug 24 12:37:13.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m58.016303414s + Aug 24 12:37:15.031: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m0.040316818s + Aug 24 12:37:17.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m2.01329445s + Aug 24 12:37:19.042: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m4.050683114s + Aug 24 12:37:21.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m6.012712154s + Aug 24 12:37:23.043: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m8.051893617s + Aug 24 12:37:25.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m10.014539184s + Aug 24 12:37:27.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m12.014006634s + Aug 24 12:37:29.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m14.014447159s + Aug 24 12:37:31.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m16.014168908s + Aug 24 12:37:33.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m18.013435872s + Aug 24 12:37:35.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m20.01477666s + Aug 24 12:37:37.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m22.015192492s + Aug 24 12:37:39.014: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m24.023168514s + Aug 24 12:37:41.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m26.015026379s + Aug 24 12:37:43.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m28.014131972s + Aug 24 12:37:45.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m30.01333294s + Aug 24 12:37:47.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m32.014482923s + Aug 24 12:37:49.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m34.01609057s + Aug 24 12:37:51.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m36.016148694s + Aug 24 12:37:53.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m38.015743263s + Aug 24 12:37:55.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m40.013744124s + Aug 24 12:37:57.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m42.01301103s + Aug 24 12:37:59.003: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m44.012241039s + Aug 24 12:38:01.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m46.015568152s + Aug 24 12:38:03.008: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m48.017184119s + Aug 24 12:38:05.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m50.013847198s + Aug 24 12:38:07.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m52.012842924s + Aug 24 12:38:09.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m54.014022261s + Aug 24 12:38:11.047: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m56.05563053s + Aug 24 12:38:13.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m58.015623238s + Aug 24 12:38:15.034: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m0.04293272s + Aug 24 12:38:17.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m2.015209321s + Aug 24 12:38:19.038: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m4.046986422s + Aug 24 12:38:21.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m6.013864658s + Aug 24 12:38:23.041: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m8.050525387s + Aug 24 12:38:25.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m10.013378497s + Aug 24 12:38:27.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m12.014328822s + Aug 24 12:38:29.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m14.013918314s + Aug 24 12:38:31.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m16.016417539s + Aug 24 12:38:33.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m18.015220934s + Aug 24 12:38:35.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m20.015668812s + Aug 24 12:38:37.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m22.014192325s + Aug 24 12:38:39.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m24.014114335s + Aug 24 12:38:41.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m26.013525295s + Aug 24 12:38:43.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m28.014314154s + Aug 24 12:38:45.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m30.013017388s + Aug 24 12:38:47.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m32.013251854s + Aug 24 12:38:49.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m34.016014154s + Aug 24 12:38:51.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m36.013488897s + Aug 24 12:38:53.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m38.013395367s + Aug 24 12:38:55.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m40.015416814s + Aug 24 12:38:57.005: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m42.013945027s + Aug 24 12:38:59.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m44.018114515s + Aug 24 12:39:01.007: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m46.016068089s + Aug 24 12:39:03.009: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m48.018169905s + Aug 24 12:39:05.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m50.014910916s + Aug 24 12:39:07.010: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m52.019050851s + Aug 24 12:39:09.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m54.015182266s + Aug 24 12:39:11.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m56.015478526s + Aug 24 12:39:13.006: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m58.014822246s + Aug 24 12:39:15.004: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.012763705s + Aug 24 12:39:15.010: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.018665829s + STEP: removing the label kubernetes.io/e2e-8ef54f1b-edb3-4554-9dfa-c5fd4f2f3046 off the node pe9deep4seen-3 08/24/23 12:39:15.01 + STEP: verifying the node doesn't have the label kubernetes.io/e2e-8ef54f1b-edb3-4554-9dfa-c5fd4f2f3046 08/24/23 12:39:15.058 + [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/framework/node/init/init.go:32 + Aug 24 12:39:15.069: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:88 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + tear down framework | framework.go:193 + STEP: Destroying namespace "sched-pred-3134" for this suite. 08/24/23 12:39:15.09 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSS +------------------------------ +[sig-node] Container Runtime blackbox test on terminated container + should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:195 +[BeforeEach] [sig-node] Container Runtime + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:39:15.127 +Aug 24 12:39:15.128: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-runtime 08/24/23 12:39:15.131 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:15.162 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:15.168 +[BeforeEach] [sig-node] Container Runtime + test/e2e/framework/metrics/init/init.go:31 +[It] should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:195 +STEP: create the container 08/24/23 12:39:15.176 +STEP: wait for the container to reach Succeeded 08/24/23 12:39:15.202 +STEP: get the container status 08/24/23 12:39:19.255 +STEP: the container should be terminated 08/24/23 12:39:19.261 +STEP: the termination message should be set 08/24/23 12:39:19.261 +Aug 24 12:39:19.262: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- +STEP: delete the container 08/24/23 12:39:19.262 +[AfterEach] [sig-node] Container Runtime + test/e2e/framework/node/init/init.go:32 +Aug 24 12:39:19.295: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Container Runtime + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-node] Container Runtime + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-node] Container Runtime + tear down framework | framework.go:193 +STEP: Destroying namespace "container-runtime-194" for this suite. 08/24/23 12:39:19.303 +------------------------------ +• [4.190 seconds] +[sig-node] Container Runtime +test/e2e/common/node/framework.go:23 + blackbox test + test/e2e/common/node/runtime.go:44 + on terminated container + test/e2e/common/node/runtime.go:137 + should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:195 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-node] Container Runtime set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:26:33.701 - Jul 29 16:26:33.702: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sched-pred 07/29/23 16:26:33.704 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:26:33.736 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:26:33.741 - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + STEP: Creating a kubernetes client 08/24/23 12:39:15.127 + Aug 24 12:39:15.128: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-runtime 08/24/23 12:39:15.131 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:15.162 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:15.168 + [BeforeEach] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:97 - Jul 29 16:26:33.745: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready - Jul 29 16:26:33.762: INFO: Waiting for terminating namespaces to be deleted... - Jul 29 16:26:33.769: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-1 before test - Jul 29 16:26:33.799: INFO: cilium-cdv47 from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.799: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:26:33.800: INFO: cilium-node-init-jdrzm from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.800: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:26:33.800: INFO: coredns-787d4945fb-2xpvx from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.800: INFO: Container coredns ready: true, restart count 0 - Jul 29 16:26:33.800: INFO: coredns-787d4945fb-clg7z from kube-system started at 2023-07-29 15:24:10 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.800: INFO: Container coredns ready: true, restart count 0 - Jul 29 16:26:33.800: INFO: kube-addon-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.801: INFO: Container kube-addon-manager ready: true, restart count 0 - Jul 29 16:26:33.801: INFO: kube-apiserver-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.801: INFO: Container kube-apiserver ready: true, restart count 0 - Jul 29 16:26:33.801: INFO: kube-controller-manager-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.801: INFO: Container kube-controller-manager ready: true, restart count 0 - Jul 29 16:26:33.801: INFO: kube-proxy-zc9m8 from kube-system started at 2023-07-29 15:13:58 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.801: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:26:33.801: INFO: kube-scheduler-wetuj3nuajog-1 from kube-system started at 2023-07-29 15:25:15 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.801: INFO: Container kube-scheduler ready: true, restart count 0 - Jul 29 16:26:33.802: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-9dt2r from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:26:33.802: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:26:33.802: INFO: Container systemd-logs ready: true, restart count 0 - Jul 29 16:26:33.802: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-2 before test - Jul 29 16:26:33.830: INFO: cilium-kxphw from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.830: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:26:33.830: INFO: cilium-node-init-fqx5t from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.830: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:26:33.830: INFO: cilium-operator-8c499d9f6-hfgjd from kube-system started at 2023-07-29 15:23:01 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.830: INFO: Container cilium-operator ready: true, restart count 0 - Jul 29 16:26:33.830: INFO: kube-addon-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.830: INFO: Container kube-addon-manager ready: true, restart count 0 - Jul 29 16:26:33.830: INFO: kube-apiserver-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.830: INFO: Container kube-apiserver ready: true, restart count 0 - Jul 29 16:26:33.830: INFO: kube-controller-manager-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.830: INFO: Container kube-controller-manager ready: true, restart count 0 - Jul 29 16:26:33.830: INFO: kube-proxy-gzqkk from kube-system started at 2023-07-29 15:14:12 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.830: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:26:33.830: INFO: kube-scheduler-wetuj3nuajog-2 from kube-system started at 2023-07-29 15:25:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.830: INFO: Container kube-scheduler ready: true, restart count 0 - Jul 29 16:26:33.830: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-xnwv4 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:26:33.830: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:26:33.830: INFO: Container systemd-logs ready: true, restart count 0 - Jul 29 16:26:33.830: INFO: - Logging pods the apiserver thinks is on node wetuj3nuajog-3 before test - Jul 29 16:26:33.851: INFO: cilium-node-init-9ghzk from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.851: INFO: Container node-init ready: true, restart count 0 - Jul 29 16:26:33.851: INFO: cilium-v9c5p from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.851: INFO: Container cilium-agent ready: true, restart count 0 - Jul 29 16:26:33.851: INFO: kube-proxy-v77tx from kube-system started at 2023-07-29 15:26:36 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.851: INFO: Container kube-proxy ready: true, restart count 0 - Jul 29 16:26:33.851: INFO: sonobuoy from sonobuoy started at 2023-07-29 15:28:59 +0000 UTC (1 container statuses recorded) - Jul 29 16:26:33.851: INFO: Container kube-sonobuoy ready: true, restart count 0 - Jul 29 16:26:33.852: INFO: sonobuoy-e2e-job-7bf00df102b6496e from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:26:33.852: INFO: Container e2e ready: true, restart count 0 - Jul 29 16:26:33.852: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:26:33.852: INFO: sonobuoy-systemd-logs-daemon-set-213a70ea07bf4097-k84f6 from sonobuoy started at 2023-07-29 15:29:10 +0000 UTC (2 container statuses recorded) - Jul 29 16:26:33.852: INFO: Container sonobuoy-worker ready: true, restart count 0 - Jul 29 16:26:33.852: INFO: Container systemd-logs ready: true, restart count 0 - [It] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] - test/e2e/scheduling/predicates.go:704 - STEP: Trying to launch a pod without a label to get a node which can launch it. 07/29/23 16:26:33.852 - Jul 29 16:26:33.871: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-pred-6768" to be "running" - Jul 29 16:26:33.877: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 6.136891ms - Jul 29 16:26:35.918: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.04783491s - Jul 29 16:26:35.919: INFO: Pod "without-label" satisfied condition "running" - STEP: Explicitly delete pod here to free the resource it takes. 07/29/23 16:26:35.93 - STEP: Trying to apply a random label on the found node. 07/29/23 16:26:35.97 - STEP: verifying the node has the label kubernetes.io/e2e-04e648e6-ec38-46c3-9518-31743e399ea9 95 07/29/23 16:26:35.989 - STEP: Trying to create a pod(pod4) with hostport 54322 and hostIP 0.0.0.0(empty string here) and expect scheduled 07/29/23 16:26:36.002 - Jul 29 16:26:36.013: INFO: Waiting up to 5m0s for pod "pod4" in namespace "sched-pred-6768" to be "not pending" - Jul 29 16:26:36.019: INFO: Pod "pod4": Phase="Pending", Reason="", readiness=false. Elapsed: 5.896311ms - Jul 29 16:26:38.028: INFO: Pod "pod4": Phase="Running", Reason="", readiness=true. Elapsed: 2.015241511s - Jul 29 16:26:38.028: INFO: Pod "pod4" satisfied condition "not pending" - STEP: Trying to create another pod(pod5) with hostport 54322 but hostIP 192.168.121.141 on the node which pod4 resides and expect not scheduled 07/29/23 16:26:38.029 - Jul 29 16:26:38.039: INFO: Waiting up to 5m0s for pod "pod5" in namespace "sched-pred-6768" to be "not pending" - Jul 29 16:26:38.048: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 8.699222ms - Jul 29 16:26:40.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019445414s - Jul 29 16:26:42.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4.017921086s - Jul 29 16:26:44.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.018059714s - Jul 29 16:26:46.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 8.016811615s - Jul 29 16:26:48.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 10.016006082s - Jul 29 16:26:50.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 12.016955857s - Jul 29 16:26:52.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 14.018361824s - Jul 29 16:26:54.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 16.01786188s - Jul 29 16:26:56.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 18.017402232s - Jul 29 16:26:58.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 20.017195229s - Jul 29 16:27:00.062: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 22.02291167s - Jul 29 16:27:02.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 24.019068934s - Jul 29 16:27:04.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 26.016399641s - Jul 29 16:27:06.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 28.019988991s - Jul 29 16:27:08.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 30.020595558s - Jul 29 16:27:10.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 32.018770096s - Jul 29 16:27:12.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 34.020221748s - Jul 29 16:27:14.062: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 36.02254201s - Jul 29 16:27:16.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 38.015215185s - Jul 29 16:27:18.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 40.017414509s - Jul 29 16:27:20.061: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 42.022392065s - Jul 29 16:27:22.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 44.017036833s - Jul 29 16:27:24.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 46.018638197s - Jul 29 16:27:26.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 48.016987515s - Jul 29 16:27:28.065: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 50.0260014s - Jul 29 16:27:30.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 52.019162775s - Jul 29 16:27:32.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 54.017616883s - Jul 29 16:27:34.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 56.019203506s - Jul 29 16:27:36.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 58.018324142s - Jul 29 16:27:38.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.017714553s - Jul 29 16:27:40.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.01754003s - Jul 29 16:27:42.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.016740801s - Jul 29 16:27:44.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.01652739s - Jul 29 16:27:46.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.018162882s - Jul 29 16:27:48.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.017826334s - Jul 29 16:27:50.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.015897946s - Jul 29 16:27:52.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.018339353s - Jul 29 16:27:54.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.018262311s - Jul 29 16:27:56.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.017697288s - Jul 29 16:27:58.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.016190473s - Jul 29 16:28:00.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.01683044s - Jul 29 16:28:02.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.016913339s - Jul 29 16:28:04.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.016538584s - Jul 29 16:28:06.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.017577299s - Jul 29 16:28:08.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.018357569s - Jul 29 16:28:10.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.015475285s - Jul 29 16:28:12.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.017893168s - Jul 29 16:28:14.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.016970102s - Jul 29 16:28:16.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.019184869s - Jul 29 16:28:18.062: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.023155475s - Jul 29 16:28:20.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.020780354s - Jul 29 16:28:22.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.016204953s - Jul 29 16:28:24.061: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.021987506s - Jul 29 16:28:26.063: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.023845676s - Jul 29 16:28:28.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.020585739s - Jul 29 16:28:30.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.020345244s - Jul 29 16:28:32.069: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.029597264s - Jul 29 16:28:34.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.017793185s - Jul 29 16:28:36.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.01865733s - Jul 29 16:28:38.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.017876317s - Jul 29 16:28:40.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m2.016831507s - Jul 29 16:28:42.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m4.018916195s - Jul 29 16:28:44.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m6.019668907s - Jul 29 16:28:46.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m8.016880628s - Jul 29 16:28:48.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m10.016701545s - Jul 29 16:28:50.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m12.020222677s - Jul 29 16:28:52.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m14.019934405s - Jul 29 16:28:54.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m16.018250102s - Jul 29 16:28:56.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m18.019005116s - Jul 29 16:28:58.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m20.015938105s - Jul 29 16:29:00.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m22.020603473s - Jul 29 16:29:02.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m24.016492031s - Jul 29 16:29:04.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m26.01879245s - Jul 29 16:29:06.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m28.020494636s - Jul 29 16:29:08.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m30.017897496s - Jul 29 16:29:10.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m32.020656378s - Jul 29 16:29:12.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m34.020381707s - Jul 29 16:29:14.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m36.017060459s - Jul 29 16:29:16.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m38.017544965s - Jul 29 16:29:18.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m40.018441318s - Jul 29 16:29:20.064: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m42.02474963s - Jul 29 16:29:22.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m44.018968355s - Jul 29 16:29:24.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m46.020305707s - Jul 29 16:29:26.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m48.019014234s - Jul 29 16:29:28.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m50.018629377s - Jul 29 16:29:30.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m52.018698644s - Jul 29 16:29:32.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m54.020978666s - Jul 29 16:29:34.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m56.018463931s - Jul 29 16:29:36.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 2m58.019909292s - Jul 29 16:29:38.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m0.016720963s - Jul 29 16:29:40.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m2.01599087s - Jul 29 16:29:42.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m4.01992827s - Jul 29 16:29:44.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m6.016267313s - Jul 29 16:29:46.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m8.018041522s - Jul 29 16:29:48.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m10.017798851s - Jul 29 16:29:50.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m12.018780142s - Jul 29 16:29:52.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m14.018868666s - Jul 29 16:29:54.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m16.019065506s - Jul 29 16:29:56.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m18.015455724s - Jul 29 16:29:58.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m20.018806624s - Jul 29 16:30:00.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m22.01598351s - Jul 29 16:30:02.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m24.018921351s - Jul 29 16:30:04.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m26.016816972s - Jul 29 16:30:06.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m28.017704419s - Jul 29 16:30:08.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m30.016367366s - Jul 29 16:30:10.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m32.020556639s - Jul 29 16:30:12.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m34.02061631s - Jul 29 16:30:14.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m36.019025981s - Jul 29 16:30:16.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m38.016427108s - Jul 29 16:30:18.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m40.019837326s - Jul 29 16:30:20.061: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m42.021544774s - Jul 29 16:30:22.061: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m44.022248328s - Jul 29 16:30:24.064: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m46.025094839s - Jul 29 16:30:26.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m48.018479605s - Jul 29 16:30:28.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m50.020044379s - Jul 29 16:30:30.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m52.019405446s - Jul 29 16:30:32.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m54.018565725s - Jul 29 16:30:34.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m56.019732227s - Jul 29 16:30:36.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 3m58.019028695s - Jul 29 16:30:38.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m0.017015851s - Jul 29 16:30:40.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m2.014747442s - Jul 29 16:30:42.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m4.019671476s - Jul 29 16:30:44.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m6.021043148s - Jul 29 16:30:46.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m8.017299242s - Jul 29 16:30:48.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m10.017353817s - Jul 29 16:30:50.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m12.01859539s - Jul 29 16:30:52.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m14.018767959s - Jul 29 16:30:54.059: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m16.019877366s - Jul 29 16:30:56.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m18.016244457s - Jul 29 16:30:58.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m20.01707616s - Jul 29 16:31:00.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m22.01747715s - Jul 29 16:31:02.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m24.016297599s - Jul 29 16:31:04.055: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m26.01577876s - Jul 29 16:31:06.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m28.018764734s - Jul 29 16:31:08.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m30.018251442s - Jul 29 16:31:10.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m32.01702909s - Jul 29 16:31:12.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m34.017682018s - Jul 29 16:31:14.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m36.019126564s - Jul 29 16:31:16.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m38.018689949s - Jul 29 16:31:18.054: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m40.015392494s - Jul 29 16:31:20.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m42.017313596s - Jul 29 16:31:22.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m44.019252055s - Jul 29 16:31:24.056: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m46.017150812s - Jul 29 16:31:26.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m48.01863926s - Jul 29 16:31:28.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m50.018872034s - Jul 29 16:31:30.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m52.01932638s - Jul 29 16:31:32.060: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m54.021485729s - Jul 29 16:31:34.057: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m56.018486925s - Jul 29 16:31:36.058: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 4m58.018961863s - Jul 29 16:31:38.061: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.022082154s - Jul 29 16:31:38.067: INFO: Pod "pod5": Phase="Pending", Reason="", readiness=false. Elapsed: 5m0.027916483s - STEP: removing the label kubernetes.io/e2e-04e648e6-ec38-46c3-9518-31743e399ea9 off the node wetuj3nuajog-3 07/29/23 16:31:38.067 - STEP: verifying the node doesn't have the label kubernetes.io/e2e-04e648e6-ec38-46c3-9518-31743e399ea9 07/29/23 16:31:38.109 - [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + [It] should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:195 + STEP: create the container 08/24/23 12:39:15.176 + STEP: wait for the container to reach Succeeded 08/24/23 12:39:15.202 + STEP: get the container status 08/24/23 12:39:19.255 + STEP: the container should be terminated 08/24/23 12:39:19.261 + STEP: the termination message should be set 08/24/23 12:39:19.261 + Aug 24 12:39:19.262: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- + STEP: delete the container 08/24/23 12:39:19.262 + [AfterEach] [sig-node] Container Runtime test/e2e/framework/node/init/init.go:32 - Jul 29 16:31:38.118: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] - test/e2e/scheduling/predicates.go:88 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + Aug 24 12:39:19.295: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + [DeferCleanup (Each)] [sig-node] Container Runtime dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + [DeferCleanup (Each)] [sig-node] Container Runtime tear down framework | framework.go:193 - STEP: Destroying namespace "sched-pred-6768" for this suite. 07/29/23 16:31:38.129 + STEP: Destroying namespace "container-runtime-194" for this suite. 08/24/23 12:39:19.303 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSS ------------------------------ -[sig-instrumentation] Events - should delete a collection of events [Conformance] - test/e2e/instrumentation/core_events.go:175 -[BeforeEach] [sig-instrumentation] Events +[sig-node] Probing container + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:72 +[BeforeEach] [sig-node] Probing container set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:31:38.162 -Jul 29 16:31:38.162: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename events 07/29/23 16:31:38.17 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:31:38.211 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:31:38.219 -[BeforeEach] [sig-instrumentation] Events +STEP: Creating a kubernetes client 08/24/23 12:39:19.318 +Aug 24 12:39:19.319: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-probe 08/24/23 12:39:19.32 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:19.347 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:19.354 +[BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 -[It] should delete a collection of events [Conformance] - test/e2e/instrumentation/core_events.go:175 -STEP: Create set of events 07/29/23 16:31:38.225 -Jul 29 16:31:38.240: INFO: created test-event-1 -Jul 29 16:31:38.253: INFO: created test-event-2 -Jul 29 16:31:38.263: INFO: created test-event-3 -STEP: get a list of Events with a label in the current namespace 07/29/23 16:31:38.264 -STEP: delete collection of events 07/29/23 16:31:38.271 -Jul 29 16:31:38.271: INFO: requesting DeleteCollection of events -STEP: check that the list of events matches the requested quantity 07/29/23 16:31:38.322 -Jul 29 16:31:38.323: INFO: requesting list of events to confirm quantity -[AfterEach] [sig-instrumentation] Events +[BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 +[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:72 +Aug 24 12:39:19.378: INFO: Waiting up to 5m0s for pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a" in namespace "container-probe-5187" to be "running and ready" +Aug 24 12:39:19.385: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.438705ms +Aug 24 12:39:19.385: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:39:21.396: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 2.017909529s +Aug 24 12:39:21.396: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) +Aug 24 12:39:23.422: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 4.043565869s +Aug 24 12:39:23.422: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) +Aug 24 12:39:25.396: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 6.017722387s +Aug 24 12:39:25.396: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) +Aug 24 12:39:27.429: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 8.05116671s +Aug 24 12:39:27.429: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) +Aug 24 12:39:29.394: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 10.015561442s +Aug 24 12:39:29.394: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) +Aug 24 12:39:31.398: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 12.02025761s +Aug 24 12:39:31.399: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) +Aug 24 12:39:33.392: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 14.013929884s +Aug 24 12:39:33.392: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) +Aug 24 12:39:35.435: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 16.056316339s +Aug 24 12:39:35.435: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) +Aug 24 12:39:37.393: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 18.014555018s +Aug 24 12:39:37.393: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) +Aug 24 12:39:39.395: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 20.016496024s +Aug 24 12:39:39.395: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) +Aug 24 12:39:41.394: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=true. Elapsed: 22.015399839s +Aug 24 12:39:41.394: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = true) +Aug 24 12:39:41.394: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a" satisfied condition "running and ready" +Aug 24 12:39:41.399: INFO: Container started at 2023-08-24 12:39:20 +0000 UTC, pod became ready at 2023-08-24 12:39:39 +0000 UTC +[AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 -Jul 29 16:31:38.331: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-instrumentation] Events +Aug 24 12:39:41.399: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-instrumentation] Events +[DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-instrumentation] Events +[DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 -STEP: Destroying namespace "events-5318" for this suite. 07/29/23 16:31:38.343 +STEP: Destroying namespace "container-probe-5187" for this suite. 08/24/23 12:39:41.409 ------------------------------ -• [0.196 seconds] -[sig-instrumentation] Events -test/e2e/instrumentation/common/framework.go:23 - should delete a collection of events [Conformance] - test/e2e/instrumentation/core_events.go:175 +• [SLOW TEST] [22.104 seconds] +[sig-node] Probing container +test/e2e/common/node/framework.go:23 + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:72 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-instrumentation] Events + [BeforeEach] [sig-node] Probing container set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:31:38.162 - Jul 29 16:31:38.162: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename events 07/29/23 16:31:38.17 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:31:38.211 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:31:38.219 - [BeforeEach] [sig-instrumentation] Events + STEP: Creating a kubernetes client 08/24/23 12:39:19.318 + Aug 24 12:39:19.319: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-probe 08/24/23 12:39:19.32 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:19.347 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:19.354 + [BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 - [It] should delete a collection of events [Conformance] - test/e2e/instrumentation/core_events.go:175 - STEP: Create set of events 07/29/23 16:31:38.225 - Jul 29 16:31:38.240: INFO: created test-event-1 - Jul 29 16:31:38.253: INFO: created test-event-2 - Jul 29 16:31:38.263: INFO: created test-event-3 - STEP: get a list of Events with a label in the current namespace 07/29/23 16:31:38.264 - STEP: delete collection of events 07/29/23 16:31:38.271 - Jul 29 16:31:38.271: INFO: requesting DeleteCollection of events - STEP: check that the list of events matches the requested quantity 07/29/23 16:31:38.322 - Jul 29 16:31:38.323: INFO: requesting list of events to confirm quantity - [AfterEach] [sig-instrumentation] Events + [BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 + [It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:72 + Aug 24 12:39:19.378: INFO: Waiting up to 5m0s for pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a" in namespace "container-probe-5187" to be "running and ready" + Aug 24 12:39:19.385: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Pending", Reason="", readiness=false. Elapsed: 6.438705ms + Aug 24 12:39:19.385: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:39:21.396: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 2.017909529s + Aug 24 12:39:21.396: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) + Aug 24 12:39:23.422: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 4.043565869s + Aug 24 12:39:23.422: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) + Aug 24 12:39:25.396: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 6.017722387s + Aug 24 12:39:25.396: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) + Aug 24 12:39:27.429: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 8.05116671s + Aug 24 12:39:27.429: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) + Aug 24 12:39:29.394: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 10.015561442s + Aug 24 12:39:29.394: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) + Aug 24 12:39:31.398: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 12.02025761s + Aug 24 12:39:31.399: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) + Aug 24 12:39:33.392: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 14.013929884s + Aug 24 12:39:33.392: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) + Aug 24 12:39:35.435: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 16.056316339s + Aug 24 12:39:35.435: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) + Aug 24 12:39:37.393: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 18.014555018s + Aug 24 12:39:37.393: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) + Aug 24 12:39:39.395: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=false. Elapsed: 20.016496024s + Aug 24 12:39:39.395: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = false) + Aug 24 12:39:41.394: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a": Phase="Running", Reason="", readiness=true. Elapsed: 22.015399839s + Aug 24 12:39:41.394: INFO: The phase of Pod test-webserver-db51fe43-5c11-489b-b477-416259bcc39a is Running (Ready = true) + Aug 24 12:39:41.394: INFO: Pod "test-webserver-db51fe43-5c11-489b-b477-416259bcc39a" satisfied condition "running and ready" + Aug 24 12:39:41.399: INFO: Container started at 2023-08-24 12:39:20 +0000 UTC, pod became ready at 2023-08-24 12:39:39 +0000 UTC + [AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 - Jul 29 16:31:38.331: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-instrumentation] Events + Aug 24 12:39:41.399: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-instrumentation] Events + [DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-instrumentation] Events + [DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 - STEP: Destroying namespace "events-5318" for this suite. 07/29/23 16:31:38.343 + STEP: Destroying namespace "container-probe-5187" for this suite. 08/24/23 12:39:41.409 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSS +SSSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - patching/updating a mutating webhook should work [Conformance] - test/e2e/apimachinery/webhook.go:508 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-storage] EmptyDir volumes + should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:217 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:31:38.359 -Jul 29 16:31:38.359: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 16:31:38.363 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:31:38.394 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:31:38.402 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:39:41.425 +Aug 24 12:39:41.425: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:39:41.427 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:41.461 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:41.467 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 16:31:38.439 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:31:38.882 -STEP: Deploying the webhook pod 07/29/23 16:31:38.903 -STEP: Wait for the deployment to be ready 07/29/23 16:31:38.942 -Jul 29 16:31:38.961: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service 07/29/23 16:31:40.987 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:31:41.013 -Jul 29 16:31:42.014: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] patching/updating a mutating webhook should work [Conformance] - test/e2e/apimachinery/webhook.go:508 -STEP: Creating a mutating webhook configuration 07/29/23 16:31:42.024 -STEP: Updating a mutating webhook configuration's rules to not include the create operation 07/29/23 16:31:42.071 -STEP: Creating a configMap that should not be mutated 07/29/23 16:31:42.084 -STEP: Patching a mutating webhook configuration's rules to include the create operation 07/29/23 16:31:42.103 -STEP: Creating a configMap that should be mutated 07/29/23 16:31:42.115 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[It] should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:217 +STEP: Creating a pod to test emptydir 0777 on node default medium 08/24/23 12:39:41.472 +Aug 24 12:39:41.488: INFO: Waiting up to 5m0s for pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9" in namespace "emptydir-4340" to be "Succeeded or Failed" +Aug 24 12:39:41.494: INFO: Pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9": Phase="Pending", Reason="", readiness=false. Elapsed: 5.837753ms +Aug 24 12:39:43.502: INFO: Pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9": Phase="Running", Reason="", readiness=true. Elapsed: 2.013960148s +Aug 24 12:39:45.501: INFO: Pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9": Phase="Running", Reason="", readiness=false. Elapsed: 4.013135634s +Aug 24 12:39:47.505: INFO: Pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016923904s +STEP: Saw pod success 08/24/23 12:39:47.505 +Aug 24 12:39:47.507: INFO: Pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9" satisfied condition "Succeeded or Failed" +Aug 24 12:39:47.515: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-7101fd93-b351-4589-be1b-a153ea7297e9 container test-container: +STEP: delete the pod 08/24/23 12:39:47.572 +Aug 24 12:39:47.610: INFO: Waiting for pod pod-7101fd93-b351-4589-be1b-a153ea7297e9 to disappear +Aug 24 12:39:47.619: INFO: Pod pod-7101fd93-b351-4589-be1b-a153ea7297e9 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:31:42.156: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 12:39:47.619: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-9011" for this suite. 07/29/23 16:31:42.246 -STEP: Destroying namespace "webhook-9011-markers" for this suite. 07/29/23 16:31:42.258 +STEP: Destroying namespace "emptydir-4340" for this suite. 08/24/23 12:39:47.644 ------------------------------ -• [3.912 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - patching/updating a mutating webhook should work [Conformance] - test/e2e/apimachinery/webhook.go:508 +• [SLOW TEST] [6.233 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:217 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:31:38.359 - Jul 29 16:31:38.359: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 16:31:38.363 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:31:38.394 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:31:38.402 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:39:41.425 + Aug 24 12:39:41.425: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:39:41.427 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:41.461 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:41.467 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 16:31:38.439 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:31:38.882 - STEP: Deploying the webhook pod 07/29/23 16:31:38.903 - STEP: Wait for the deployment to be ready 07/29/23 16:31:38.942 - Jul 29 16:31:38.961: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set - STEP: Deploying the webhook service 07/29/23 16:31:40.987 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:31:41.013 - Jul 29 16:31:42.014: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] patching/updating a mutating webhook should work [Conformance] - test/e2e/apimachinery/webhook.go:508 - STEP: Creating a mutating webhook configuration 07/29/23 16:31:42.024 - STEP: Updating a mutating webhook configuration's rules to not include the create operation 07/29/23 16:31:42.071 - STEP: Creating a configMap that should not be mutated 07/29/23 16:31:42.084 - STEP: Patching a mutating webhook configuration's rules to include the create operation 07/29/23 16:31:42.103 - STEP: Creating a configMap that should be mutated 07/29/23 16:31:42.115 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [It] should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:217 + STEP: Creating a pod to test emptydir 0777 on node default medium 08/24/23 12:39:41.472 + Aug 24 12:39:41.488: INFO: Waiting up to 5m0s for pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9" in namespace "emptydir-4340" to be "Succeeded or Failed" + Aug 24 12:39:41.494: INFO: Pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9": Phase="Pending", Reason="", readiness=false. Elapsed: 5.837753ms + Aug 24 12:39:43.502: INFO: Pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9": Phase="Running", Reason="", readiness=true. Elapsed: 2.013960148s + Aug 24 12:39:45.501: INFO: Pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9": Phase="Running", Reason="", readiness=false. Elapsed: 4.013135634s + Aug 24 12:39:47.505: INFO: Pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016923904s + STEP: Saw pod success 08/24/23 12:39:47.505 + Aug 24 12:39:47.507: INFO: Pod "pod-7101fd93-b351-4589-be1b-a153ea7297e9" satisfied condition "Succeeded or Failed" + Aug 24 12:39:47.515: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-7101fd93-b351-4589-be1b-a153ea7297e9 container test-container: + STEP: delete the pod 08/24/23 12:39:47.572 + Aug 24 12:39:47.610: INFO: Waiting for pod pod-7101fd93-b351-4589-be1b-a153ea7297e9 to disappear + Aug 24 12:39:47.619: INFO: Pod pod-7101fd93-b351-4589-be1b-a153ea7297e9 no longer exists + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:31:42.156: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 12:39:47.619: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-9011" for this suite. 07/29/23 16:31:42.246 - STEP: Destroying namespace "webhook-9011-markers" for this suite. 07/29/23 16:31:42.258 + STEP: Destroying namespace "emptydir-4340" for this suite. 08/24/23 12:39:47.644 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSS +SSSS ------------------------------ -[sig-scheduling] LimitRange - should list, patch and delete a LimitRange by collection [Conformance] - test/e2e/scheduling/limit_range.go:239 -[BeforeEach] [sig-scheduling] LimitRange +[sig-node] ConfigMap + should be consumable via the environment [NodeConformance] [Conformance] + test/e2e/common/node/configmap.go:93 +[BeforeEach] [sig-node] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:31:42.274 -Jul 29 16:31:42.274: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename limitrange 07/29/23 16:31:42.277 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:31:42.326 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:31:42.33 -[BeforeEach] [sig-scheduling] LimitRange +STEP: Creating a kubernetes client 08/24/23 12:39:47.662 +Aug 24 12:39:47.662: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 12:39:47.664 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:47.699 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:47.714 +[BeforeEach] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[It] should list, patch and delete a LimitRange by collection [Conformance] - test/e2e/scheduling/limit_range.go:239 -STEP: Creating LimitRange "e2e-limitrange-kt5pz" in namespace "limitrange-2599" 07/29/23 16:31:42.335 -STEP: Creating another limitRange in another namespace 07/29/23 16:31:42.343 -Jul 29 16:31:42.377: INFO: Namespace "e2e-limitrange-kt5pz-5150" created -Jul 29 16:31:42.377: INFO: Creating LimitRange "e2e-limitrange-kt5pz" in namespace "e2e-limitrange-kt5pz-5150" -STEP: Listing all LimitRanges with label "e2e-test=e2e-limitrange-kt5pz" 07/29/23 16:31:42.385 -Jul 29 16:31:42.391: INFO: Found 2 limitRanges -STEP: Patching LimitRange "e2e-limitrange-kt5pz" in "limitrange-2599" namespace 07/29/23 16:31:42.391 -Jul 29 16:31:42.401: INFO: LimitRange "e2e-limitrange-kt5pz" has been patched -STEP: Delete LimitRange "e2e-limitrange-kt5pz" by Collection with labelSelector: "e2e-limitrange-kt5pz=patched" 07/29/23 16:31:42.401 -STEP: Confirm that the limitRange "e2e-limitrange-kt5pz" has been deleted 07/29/23 16:31:42.414 -Jul 29 16:31:42.414: INFO: Requesting list of LimitRange to confirm quantity -Jul 29 16:31:42.420: INFO: Found 0 LimitRange with label "e2e-limitrange-kt5pz=patched" -Jul 29 16:31:42.420: INFO: LimitRange "e2e-limitrange-kt5pz" has been deleted. -STEP: Confirm that a single LimitRange still exists with label "e2e-test=e2e-limitrange-kt5pz" 07/29/23 16:31:42.42 -Jul 29 16:31:42.426: INFO: Found 1 limitRange -[AfterEach] [sig-scheduling] LimitRange +[It] should be consumable via the environment [NodeConformance] [Conformance] + test/e2e/common/node/configmap.go:93 +STEP: Creating configMap configmap-166/configmap-test-998f3ce1-1ddc-40bb-a08b-b0d7535540ea 08/24/23 12:39:47.72 +STEP: Creating a pod to test consume configMaps 08/24/23 12:39:47.731 +Aug 24 12:39:47.760: INFO: Waiting up to 5m0s for pod "pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce" in namespace "configmap-166" to be "Succeeded or Failed" +Aug 24 12:39:47.768: INFO: Pod "pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce": Phase="Pending", Reason="", readiness=false. Elapsed: 8.501361ms +Aug 24 12:39:49.779: INFO: Pod "pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019036009s +Aug 24 12:39:51.775: INFO: Pod "pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015060946s +STEP: Saw pod success 08/24/23 12:39:51.775 +Aug 24 12:39:51.776: INFO: Pod "pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce" satisfied condition "Succeeded or Failed" +Aug 24 12:39:51.781: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce container env-test: +STEP: delete the pod 08/24/23 12:39:51.795 +Aug 24 12:39:51.823: INFO: Waiting for pod pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce to disappear +Aug 24 12:39:51.829: INFO: Pod pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce no longer exists +[AfterEach] [sig-node] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:31:42.426: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-scheduling] LimitRange +Aug 24 12:39:51.830: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-scheduling] LimitRange +[DeferCleanup (Each)] [sig-node] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-scheduling] LimitRange +[DeferCleanup (Each)] [sig-node] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "limitrange-2599" for this suite. 07/29/23 16:31:42.435 -STEP: Destroying namespace "e2e-limitrange-kt5pz-5150" for this suite. 07/29/23 16:31:42.445 +STEP: Destroying namespace "configmap-166" for this suite. 08/24/23 12:39:51.839 ------------------------------ -• [0.184 seconds] -[sig-scheduling] LimitRange -test/e2e/scheduling/framework.go:40 - should list, patch and delete a LimitRange by collection [Conformance] - test/e2e/scheduling/limit_range.go:239 +• [4.189 seconds] +[sig-node] ConfigMap +test/e2e/common/node/framework.go:23 + should be consumable via the environment [NodeConformance] [Conformance] + test/e2e/common/node/configmap.go:93 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-scheduling] LimitRange + [BeforeEach] [sig-node] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:31:42.274 - Jul 29 16:31:42.274: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename limitrange 07/29/23 16:31:42.277 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:31:42.326 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:31:42.33 - [BeforeEach] [sig-scheduling] LimitRange + STEP: Creating a kubernetes client 08/24/23 12:39:47.662 + Aug 24 12:39:47.662: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 12:39:47.664 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:47.699 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:47.714 + [BeforeEach] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [It] should list, patch and delete a LimitRange by collection [Conformance] - test/e2e/scheduling/limit_range.go:239 - STEP: Creating LimitRange "e2e-limitrange-kt5pz" in namespace "limitrange-2599" 07/29/23 16:31:42.335 - STEP: Creating another limitRange in another namespace 07/29/23 16:31:42.343 - Jul 29 16:31:42.377: INFO: Namespace "e2e-limitrange-kt5pz-5150" created - Jul 29 16:31:42.377: INFO: Creating LimitRange "e2e-limitrange-kt5pz" in namespace "e2e-limitrange-kt5pz-5150" - STEP: Listing all LimitRanges with label "e2e-test=e2e-limitrange-kt5pz" 07/29/23 16:31:42.385 - Jul 29 16:31:42.391: INFO: Found 2 limitRanges - STEP: Patching LimitRange "e2e-limitrange-kt5pz" in "limitrange-2599" namespace 07/29/23 16:31:42.391 - Jul 29 16:31:42.401: INFO: LimitRange "e2e-limitrange-kt5pz" has been patched - STEP: Delete LimitRange "e2e-limitrange-kt5pz" by Collection with labelSelector: "e2e-limitrange-kt5pz=patched" 07/29/23 16:31:42.401 - STEP: Confirm that the limitRange "e2e-limitrange-kt5pz" has been deleted 07/29/23 16:31:42.414 - Jul 29 16:31:42.414: INFO: Requesting list of LimitRange to confirm quantity - Jul 29 16:31:42.420: INFO: Found 0 LimitRange with label "e2e-limitrange-kt5pz=patched" - Jul 29 16:31:42.420: INFO: LimitRange "e2e-limitrange-kt5pz" has been deleted. - STEP: Confirm that a single LimitRange still exists with label "e2e-test=e2e-limitrange-kt5pz" 07/29/23 16:31:42.42 - Jul 29 16:31:42.426: INFO: Found 1 limitRange - [AfterEach] [sig-scheduling] LimitRange + [It] should be consumable via the environment [NodeConformance] [Conformance] + test/e2e/common/node/configmap.go:93 + STEP: Creating configMap configmap-166/configmap-test-998f3ce1-1ddc-40bb-a08b-b0d7535540ea 08/24/23 12:39:47.72 + STEP: Creating a pod to test consume configMaps 08/24/23 12:39:47.731 + Aug 24 12:39:47.760: INFO: Waiting up to 5m0s for pod "pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce" in namespace "configmap-166" to be "Succeeded or Failed" + Aug 24 12:39:47.768: INFO: Pod "pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce": Phase="Pending", Reason="", readiness=false. Elapsed: 8.501361ms + Aug 24 12:39:49.779: INFO: Pod "pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019036009s + Aug 24 12:39:51.775: INFO: Pod "pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015060946s + STEP: Saw pod success 08/24/23 12:39:51.775 + Aug 24 12:39:51.776: INFO: Pod "pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce" satisfied condition "Succeeded or Failed" + Aug 24 12:39:51.781: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce container env-test: + STEP: delete the pod 08/24/23 12:39:51.795 + Aug 24 12:39:51.823: INFO: Waiting for pod pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce to disappear + Aug 24 12:39:51.829: INFO: Pod pod-configmaps-9073c0b1-ee61-437f-9fc2-ab72cd1f05ce no longer exists + [AfterEach] [sig-node] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:31:42.426: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-scheduling] LimitRange + Aug 24 12:39:51.830: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-scheduling] LimitRange + [DeferCleanup (Each)] [sig-node] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-scheduling] LimitRange + [DeferCleanup (Each)] [sig-node] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "limitrange-2599" for this suite. 07/29/23 16:31:42.435 - STEP: Destroying namespace "e2e-limitrange-kt5pz-5150" for this suite. 07/29/23 16:31:42.445 + STEP: Destroying namespace "configmap-166" for this suite. 08/24/23 12:39:51.839 << End Captured GinkgoWriter Output ------------------------------ -[sig-node] Probing container - with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:72 -[BeforeEach] [sig-node] Probing container +SSSSSSSSSSSSSSS +------------------------------ +[sig-node] Sysctls [LinuxOnly] [NodeConformance] + should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] + test/e2e/common/node/sysctl.go:123 +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + test/e2e/common/node/sysctl.go:37 +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:31:42.458 -Jul 29 16:31:42.458: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-probe 07/29/23 16:31:42.461 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:31:42.489 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:31:42.492 -[BeforeEach] [sig-node] Probing container +STEP: Creating a kubernetes client 08/24/23 12:39:51.859 +Aug 24 12:39:51.859: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sysctl 08/24/23 12:39:51.862 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:51.893 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:51.897 +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 -[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:72 -Jul 29 16:31:42.510: INFO: Waiting up to 5m0s for pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a" in namespace "container-probe-7112" to be "running and ready" -Jul 29 16:31:42.515: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Pending", Reason="", readiness=false. Elapsed: 5.008112ms -Jul 29 16:31:42.515: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:31:44.523: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 2.013737857s -Jul 29 16:31:44.523: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) -Jul 29 16:31:46.523: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 4.013218105s -Jul 29 16:31:46.523: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) -Jul 29 16:31:48.523: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 6.013258071s -Jul 29 16:31:48.523: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) -Jul 29 16:31:50.525: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 8.01572412s -Jul 29 16:31:50.525: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) -Jul 29 16:31:52.521: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 10.011849964s -Jul 29 16:31:52.522: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) -Jul 29 16:31:54.524: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 12.013874206s -Jul 29 16:31:54.524: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) -Jul 29 16:31:56.525: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 14.015061672s -Jul 29 16:31:56.525: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) -Jul 29 16:31:58.525: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 16.015347473s -Jul 29 16:31:58.525: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) -Jul 29 16:32:00.527: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 18.016978177s -Jul 29 16:32:00.527: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) -Jul 29 16:32:02.529: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 20.018997327s -Jul 29 16:32:02.529: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) -Jul 29 16:32:04.526: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=true. Elapsed: 22.016534682s -Jul 29 16:32:04.527: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = true) -Jul 29 16:32:04.527: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a" satisfied condition "running and ready" -Jul 29 16:32:04.531: INFO: Container started at 2023-07-29 16:31:43 +0000 UTC, pod became ready at 2023-07-29 16:32:02 +0000 UTC -[AfterEach] [sig-node] Probing container +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + test/e2e/common/node/sysctl.go:67 +[It] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] + test/e2e/common/node/sysctl.go:123 +STEP: Creating a pod with one valid and two invalid sysctls 08/24/23 12:39:51.905 +[AfterEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/node/init/init.go:32 -Jul 29 16:32:04.533: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Probing container +Aug 24 12:39:51.914: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] tear down framework | framework.go:193 -STEP: Destroying namespace "container-probe-7112" for this suite. 07/29/23 16:32:04.541 +STEP: Destroying namespace "sysctl-7738" for this suite. 08/24/23 12:39:51.923 ------------------------------ -• [SLOW TEST] [22.094 seconds] -[sig-node] Probing container +• [0.079 seconds] +[sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/common/node/framework.go:23 - with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:72 + should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] + test/e2e/common/node/sysctl.go:123 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Probing container + [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + test/e2e/common/node/sysctl.go:37 + [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:31:42.458 - Jul 29 16:31:42.458: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-probe 07/29/23 16:31:42.461 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:31:42.489 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:31:42.492 - [BeforeEach] [sig-node] Probing container + STEP: Creating a kubernetes client 08/24/23 12:39:51.859 + Aug 24 12:39:51.859: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sysctl 08/24/23 12:39:51.862 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:51.893 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:51.897 + [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 - [It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:72 - Jul 29 16:31:42.510: INFO: Waiting up to 5m0s for pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a" in namespace "container-probe-7112" to be "running and ready" - Jul 29 16:31:42.515: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Pending", Reason="", readiness=false. Elapsed: 5.008112ms - Jul 29 16:31:42.515: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:31:44.523: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 2.013737857s - Jul 29 16:31:44.523: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) - Jul 29 16:31:46.523: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 4.013218105s - Jul 29 16:31:46.523: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) - Jul 29 16:31:48.523: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 6.013258071s - Jul 29 16:31:48.523: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) - Jul 29 16:31:50.525: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 8.01572412s - Jul 29 16:31:50.525: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) - Jul 29 16:31:52.521: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 10.011849964s - Jul 29 16:31:52.522: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) - Jul 29 16:31:54.524: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 12.013874206s - Jul 29 16:31:54.524: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) - Jul 29 16:31:56.525: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 14.015061672s - Jul 29 16:31:56.525: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) - Jul 29 16:31:58.525: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 16.015347473s - Jul 29 16:31:58.525: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) - Jul 29 16:32:00.527: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 18.016978177s - Jul 29 16:32:00.527: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) - Jul 29 16:32:02.529: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=false. Elapsed: 20.018997327s - Jul 29 16:32:02.529: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = false) - Jul 29 16:32:04.526: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a": Phase="Running", Reason="", readiness=true. Elapsed: 22.016534682s - Jul 29 16:32:04.527: INFO: The phase of Pod test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a is Running (Ready = true) - Jul 29 16:32:04.527: INFO: Pod "test-webserver-d7044dcd-f04b-456a-afd1-1b5a0db12b7a" satisfied condition "running and ready" - Jul 29 16:32:04.531: INFO: Container started at 2023-07-29 16:31:43 +0000 UTC, pod became ready at 2023-07-29 16:32:02 +0000 UTC - [AfterEach] [sig-node] Probing container + [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + test/e2e/common/node/sysctl.go:67 + [It] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] + test/e2e/common/node/sysctl.go:123 + STEP: Creating a pod with one valid and two invalid sysctls 08/24/23 12:39:51.905 + [AfterEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/node/init/init.go:32 - Jul 29 16:32:04.533: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Probing container + Aug 24 12:39:51.914: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] tear down framework | framework.go:193 - STEP: Destroying namespace "container-probe-7112" for this suite. 07/29/23 16:32:04.541 + STEP: Destroying namespace "sysctl-7738" for this suite. 08/24/23 12:39:51.923 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSS +SS ------------------------------ -[sig-apps] Job - should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] - test/e2e/apps/job.go:426 -[BeforeEach] [sig-apps] Job +[sig-storage] ConfigMap + should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:47 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:32:04.558 -Jul 29 16:32:04.558: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename job 07/29/23 16:32:04.562 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:04.588 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:04.593 -[BeforeEach] [sig-apps] Job +STEP: Creating a kubernetes client 08/24/23 12:39:51.94 +Aug 24 12:39:51.940: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 12:39:51.944 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:51.979 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:51.987 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[It] should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] - test/e2e/apps/job.go:426 -STEP: Creating a job 07/29/23 16:32:04.597 -STEP: Ensuring job reaches completions 07/29/23 16:32:04.607 -[AfterEach] [sig-apps] Job +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:47 +STEP: Creating configMap with name configmap-test-volume-a56f664d-a517-44a1-bc01-21ebe7255d1b 08/24/23 12:39:51.992 +STEP: Creating a pod to test consume configMaps 08/24/23 12:39:52.001 +Aug 24 12:39:52.018: INFO: Waiting up to 5m0s for pod "pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969" in namespace "configmap-560" to be "Succeeded or Failed" +Aug 24 12:39:52.023: INFO: Pod "pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969": Phase="Pending", Reason="", readiness=false. Elapsed: 4.78597ms +Aug 24 12:39:54.032: INFO: Pod "pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013728179s +Aug 24 12:39:56.031: INFO: Pod "pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013379716s +STEP: Saw pod success 08/24/23 12:39:56.032 +Aug 24 12:39:56.032: INFO: Pod "pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969" satisfied condition "Succeeded or Failed" +Aug 24 12:39:56.037: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969 container agnhost-container: +STEP: delete the pod 08/24/23 12:39:56.048 +Aug 24 12:39:56.066: INFO: Waiting for pod pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969 to disappear +Aug 24 12:39:56.071: INFO: Pod pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969 no longer exists +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:32:16.615: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Job +Aug 24 12:39:56.071: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Job +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Job +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "job-6477" for this suite. 07/29/23 16:32:16.625 +STEP: Destroying namespace "configmap-560" for this suite. 08/24/23 12:39:56.089 ------------------------------ -• [SLOW TEST] [12.077 seconds] -[sig-apps] Job -test/e2e/apps/framework.go:23 - should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] - test/e2e/apps/job.go:426 +• [4.162 seconds] +[sig-storage] ConfigMap +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:47 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Job + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:32:04.558 - Jul 29 16:32:04.558: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename job 07/29/23 16:32:04.562 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:04.588 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:04.593 - [BeforeEach] [sig-apps] Job + STEP: Creating a kubernetes client 08/24/23 12:39:51.94 + Aug 24 12:39:51.940: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 12:39:51.944 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:51.979 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:51.987 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [It] should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] - test/e2e/apps/job.go:426 - STEP: Creating a job 07/29/23 16:32:04.597 - STEP: Ensuring job reaches completions 07/29/23 16:32:04.607 - [AfterEach] [sig-apps] Job + [It] should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:47 + STEP: Creating configMap with name configmap-test-volume-a56f664d-a517-44a1-bc01-21ebe7255d1b 08/24/23 12:39:51.992 + STEP: Creating a pod to test consume configMaps 08/24/23 12:39:52.001 + Aug 24 12:39:52.018: INFO: Waiting up to 5m0s for pod "pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969" in namespace "configmap-560" to be "Succeeded or Failed" + Aug 24 12:39:52.023: INFO: Pod "pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969": Phase="Pending", Reason="", readiness=false. Elapsed: 4.78597ms + Aug 24 12:39:54.032: INFO: Pod "pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013728179s + Aug 24 12:39:56.031: INFO: Pod "pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013379716s + STEP: Saw pod success 08/24/23 12:39:56.032 + Aug 24 12:39:56.032: INFO: Pod "pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969" satisfied condition "Succeeded or Failed" + Aug 24 12:39:56.037: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969 container agnhost-container: + STEP: delete the pod 08/24/23 12:39:56.048 + Aug 24 12:39:56.066: INFO: Waiting for pod pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969 to disappear + Aug 24 12:39:56.071: INFO: Pod pod-configmaps-69a6a97e-b8cb-4dad-8901-f46423289969 no longer exists + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:32:16.615: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Job + Aug 24 12:39:56.071: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Job + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Job + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "job-6477" for this suite. 07/29/23 16:32:16.625 + STEP: Destroying namespace "configmap-560" for this suite. 08/24/23 12:39:56.089 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Pods - should support remote command execution over websockets [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:536 -[BeforeEach] [sig-node] Pods +[sig-instrumentation] Events API + should ensure that an event can be fetched, patched, deleted, and listed [Conformance] + test/e2e/instrumentation/events.go:98 +[BeforeEach] [sig-instrumentation] Events API set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:32:16.646 -Jul 29 16:32:16.646: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 16:32:16.648 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:16.678 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:16.682 -[BeforeEach] [sig-node] Pods +STEP: Creating a kubernetes client 08/24/23 12:39:56.108 +Aug 24 12:39:56.108: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename events 08/24/23 12:39:56.11 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:56.143 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:56.148 +[BeforeEach] [sig-instrumentation] Events API test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 -[It] should support remote command execution over websockets [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:536 -Jul 29 16:32:16.686: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: creating the pod 07/29/23 16:32:16.687 -STEP: submitting the pod to kubernetes 07/29/23 16:32:16.688 -Jul 29 16:32:16.702: INFO: Waiting up to 5m0s for pod "pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824" in namespace "pods-4947" to be "running and ready" -Jul 29 16:32:16.708: INFO: Pod "pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824": Phase="Pending", Reason="", readiness=false. Elapsed: 5.567049ms -Jul 29 16:32:16.708: INFO: The phase of Pod pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:32:18.716: INFO: Pod "pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824": Phase="Running", Reason="", readiness=true. Elapsed: 2.013692116s -Jul 29 16:32:18.716: INFO: The phase of Pod pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824 is Running (Ready = true) -Jul 29 16:32:18.716: INFO: Pod "pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824" satisfied condition "running and ready" -[AfterEach] [sig-node] Pods +[BeforeEach] [sig-instrumentation] Events API + test/e2e/instrumentation/events.go:84 +[It] should ensure that an event can be fetched, patched, deleted, and listed [Conformance] + test/e2e/instrumentation/events.go:98 +STEP: creating a test event 08/24/23 12:39:56.154 +STEP: listing events in all namespaces 08/24/23 12:39:56.171 +STEP: listing events in test namespace 08/24/23 12:39:56.181 +STEP: listing events with field selection filtering on source 08/24/23 12:39:56.189 +STEP: listing events with field selection filtering on reportingController 08/24/23 12:39:56.199 +STEP: getting the test event 08/24/23 12:39:56.204 +STEP: patching the test event 08/24/23 12:39:56.209 +STEP: getting the test event 08/24/23 12:39:56.227 +STEP: updating the test event 08/24/23 12:39:56.233 +STEP: getting the test event 08/24/23 12:39:56.246 +STEP: deleting the test event 08/24/23 12:39:56.253 +STEP: listing events in all namespaces 08/24/23 12:39:56.27 +STEP: listing events in test namespace 08/24/23 12:39:56.278 +[AfterEach] [sig-instrumentation] Events API test/e2e/framework/node/init/init.go:32 -Jul 29 16:32:18.822: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods +Aug 24 12:39:56.286: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-instrumentation] Events API test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-instrumentation] Events API dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-instrumentation] Events API tear down framework | framework.go:193 -STEP: Destroying namespace "pods-4947" for this suite. 07/29/23 16:32:18.832 +STEP: Destroying namespace "events-4965" for this suite. 08/24/23 12:39:56.294 ------------------------------ -• [2.198 seconds] -[sig-node] Pods -test/e2e/common/node/framework.go:23 - should support remote command execution over websockets [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:536 +• [0.198 seconds] +[sig-instrumentation] Events API +test/e2e/instrumentation/common/framework.go:23 + should ensure that an event can be fetched, patched, deleted, and listed [Conformance] + test/e2e/instrumentation/events.go:98 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods + [BeforeEach] [sig-instrumentation] Events API set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:32:16.646 - Jul 29 16:32:16.646: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 16:32:16.648 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:16.678 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:16.682 - [BeforeEach] [sig-node] Pods + STEP: Creating a kubernetes client 08/24/23 12:39:56.108 + Aug 24 12:39:56.108: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename events 08/24/23 12:39:56.11 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:56.143 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:56.148 + [BeforeEach] [sig-instrumentation] Events API test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 - [It] should support remote command execution over websockets [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:536 - Jul 29 16:32:16.686: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: creating the pod 07/29/23 16:32:16.687 - STEP: submitting the pod to kubernetes 07/29/23 16:32:16.688 - Jul 29 16:32:16.702: INFO: Waiting up to 5m0s for pod "pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824" in namespace "pods-4947" to be "running and ready" - Jul 29 16:32:16.708: INFO: Pod "pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824": Phase="Pending", Reason="", readiness=false. Elapsed: 5.567049ms - Jul 29 16:32:16.708: INFO: The phase of Pod pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:32:18.716: INFO: Pod "pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824": Phase="Running", Reason="", readiness=true. Elapsed: 2.013692116s - Jul 29 16:32:18.716: INFO: The phase of Pod pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824 is Running (Ready = true) - Jul 29 16:32:18.716: INFO: Pod "pod-exec-websocket-4fa65fcd-d7c2-4cf7-ad38-42c9dc8b8824" satisfied condition "running and ready" - [AfterEach] [sig-node] Pods + [BeforeEach] [sig-instrumentation] Events API + test/e2e/instrumentation/events.go:84 + [It] should ensure that an event can be fetched, patched, deleted, and listed [Conformance] + test/e2e/instrumentation/events.go:98 + STEP: creating a test event 08/24/23 12:39:56.154 + STEP: listing events in all namespaces 08/24/23 12:39:56.171 + STEP: listing events in test namespace 08/24/23 12:39:56.181 + STEP: listing events with field selection filtering on source 08/24/23 12:39:56.189 + STEP: listing events with field selection filtering on reportingController 08/24/23 12:39:56.199 + STEP: getting the test event 08/24/23 12:39:56.204 + STEP: patching the test event 08/24/23 12:39:56.209 + STEP: getting the test event 08/24/23 12:39:56.227 + STEP: updating the test event 08/24/23 12:39:56.233 + STEP: getting the test event 08/24/23 12:39:56.246 + STEP: deleting the test event 08/24/23 12:39:56.253 + STEP: listing events in all namespaces 08/24/23 12:39:56.27 + STEP: listing events in test namespace 08/24/23 12:39:56.278 + [AfterEach] [sig-instrumentation] Events API test/e2e/framework/node/init/init.go:32 - Jul 29 16:32:18.822: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods + Aug 24 12:39:56.286: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-instrumentation] Events API test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-instrumentation] Events API dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-instrumentation] Events API tear down framework | framework.go:193 - STEP: Destroying namespace "pods-4947" for this suite. 07/29/23 16:32:18.832 + STEP: Destroying namespace "events-4965" for this suite. 08/24/23 12:39:56.294 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] CSIInlineVolumes - should support CSIVolumeSource in Pod API [Conformance] - test/e2e/storage/csi_inline.go:131 -[BeforeEach] [sig-storage] CSIInlineVolumes +[sig-node] RuntimeClass + should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:104 +[BeforeEach] [sig-node] RuntimeClass set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:32:18.873 -Jul 29 16:32:18.874: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename csiinlinevolumes 07/29/23 16:32:18.876 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:18.933 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:18.938 -[BeforeEach] [sig-storage] CSIInlineVolumes +STEP: Creating a kubernetes client 08/24/23 12:39:56.307 +Aug 24 12:39:56.307: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename runtimeclass 08/24/23 12:39:56.309 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:56.345 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:56.349 +[BeforeEach] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:31 -[It] should support CSIVolumeSource in Pod API [Conformance] - test/e2e/storage/csi_inline.go:131 -STEP: creating 07/29/23 16:32:18.943 -STEP: getting 07/29/23 16:32:18.974 -STEP: listing in namespace 07/29/23 16:32:18.986 -STEP: patching 07/29/23 16:32:18.991 -STEP: deleting 07/29/23 16:32:19.004 -[AfterEach] [sig-storage] CSIInlineVolumes +[It] should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:104 +Aug 24 12:39:56.374: INFO: Waiting up to 1m20s for at least 1 pods in namespace runtimeclass-3558 to be scheduled +Aug 24 12:39:56.381: INFO: 1 pods are not scheduled: [runtimeclass-3558/test-runtimeclass-runtimeclass-3558-preconfigured-handler-7j76d(9e546da6-9518-4a99-87f8-b962a2977938)] +[AfterEach] [sig-node] RuntimeClass test/e2e/framework/node/init/init.go:32 -Jul 29 16:32:19.018: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes +Aug 24 12:39:58.401: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes +[DeferCleanup (Each)] [sig-node] RuntimeClass dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes +[DeferCleanup (Each)] [sig-node] RuntimeClass tear down framework | framework.go:193 -STEP: Destroying namespace "csiinlinevolumes-1533" for this suite. 07/29/23 16:32:19.024 +STEP: Destroying namespace "runtimeclass-3558" for this suite. 08/24/23 12:39:58.412 ------------------------------ -• [0.159 seconds] -[sig-storage] CSIInlineVolumes -test/e2e/storage/utils/framework.go:23 - should support CSIVolumeSource in Pod API [Conformance] - test/e2e/storage/csi_inline.go:131 +• [2.117 seconds] +[sig-node] RuntimeClass +test/e2e/common/node/framework.go:23 + should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:104 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] CSIInlineVolumes + [BeforeEach] [sig-node] RuntimeClass set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:32:18.873 - Jul 29 16:32:18.874: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename csiinlinevolumes 07/29/23 16:32:18.876 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:18.933 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:18.938 - [BeforeEach] [sig-storage] CSIInlineVolumes + STEP: Creating a kubernetes client 08/24/23 12:39:56.307 + Aug 24 12:39:56.307: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename runtimeclass 08/24/23 12:39:56.309 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:56.345 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:56.349 + [BeforeEach] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:31 - [It] should support CSIVolumeSource in Pod API [Conformance] - test/e2e/storage/csi_inline.go:131 - STEP: creating 07/29/23 16:32:18.943 - STEP: getting 07/29/23 16:32:18.974 - STEP: listing in namespace 07/29/23 16:32:18.986 - STEP: patching 07/29/23 16:32:18.991 - STEP: deleting 07/29/23 16:32:19.004 - [AfterEach] [sig-storage] CSIInlineVolumes + [It] should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:104 + Aug 24 12:39:56.374: INFO: Waiting up to 1m20s for at least 1 pods in namespace runtimeclass-3558 to be scheduled + Aug 24 12:39:56.381: INFO: 1 pods are not scheduled: [runtimeclass-3558/test-runtimeclass-runtimeclass-3558-preconfigured-handler-7j76d(9e546da6-9518-4a99-87f8-b962a2977938)] + [AfterEach] [sig-node] RuntimeClass test/e2e/framework/node/init/init.go:32 - Jul 29 16:32:19.018: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + Aug 24 12:39:58.401: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + [DeferCleanup (Each)] [sig-node] RuntimeClass dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + [DeferCleanup (Each)] [sig-node] RuntimeClass tear down framework | framework.go:193 - STEP: Destroying namespace "csiinlinevolumes-1533" for this suite. 07/29/23 16:32:19.024 + STEP: Destroying namespace "runtimeclass-3558" for this suite. 08/24/23 12:39:58.412 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Downward API volume - should provide container's cpu request [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:221 -[BeforeEach] [sig-storage] Downward API volume +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a secret. [Conformance] + test/e2e/apimachinery/resource_quota.go:160 +[BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:32:19.04 -Jul 29 16:32:19.041: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 16:32:19.043 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:19.078 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:19.083 -[BeforeEach] [sig-storage] Downward API volume +STEP: Creating a kubernetes client 08/24/23 12:39:58.432 +Aug 24 12:39:58.433: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 12:39:58.436 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:58.47 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:58.476 +[BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 -[It] should provide container's cpu request [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:221 -STEP: Creating a pod to test downward API volume plugin 07/29/23 16:32:19.088 -Jul 29 16:32:19.103: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4" in namespace "downward-api-6948" to be "Succeeded or Failed" -Jul 29 16:32:19.108: INFO: Pod "downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4": Phase="Pending", Reason="", readiness=false. Elapsed: 4.635727ms -Jul 29 16:32:21.116: INFO: Pod "downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012553219s -Jul 29 16:32:23.120: INFO: Pod "downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016602821s -STEP: Saw pod success 07/29/23 16:32:23.12 -Jul 29 16:32:23.121: INFO: Pod "downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4" satisfied condition "Succeeded or Failed" -Jul 29 16:32:23.128: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4 container client-container: -STEP: delete the pod 07/29/23 16:32:23.152 -Jul 29 16:32:23.179: INFO: Waiting for pod downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4 to disappear -Jul 29 16:32:23.184: INFO: Pod downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4 no longer exists -[AfterEach] [sig-storage] Downward API volume +[It] should create a ResourceQuota and capture the life of a secret. [Conformance] + test/e2e/apimachinery/resource_quota.go:160 +STEP: Discovering how many secrets are in namespace by default 08/24/23 12:39:58.482 +STEP: Counting existing ResourceQuota 08/24/23 12:40:03.49 +STEP: Creating a ResourceQuota 08/24/23 12:40:08.498 +STEP: Ensuring resource quota status is calculated 08/24/23 12:40:08.508 +STEP: Creating a Secret 08/24/23 12:40:10.517 +STEP: Ensuring resource quota status captures secret creation 08/24/23 12:40:10.549 +STEP: Deleting a secret 08/24/23 12:40:12.556 +STEP: Ensuring resource quota status released usage 08/24/23 12:40:12.567 +[AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 16:32:23.185: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Downward API volume +Aug 24 12:40:14.574: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-6948" for this suite. 07/29/23 16:32:23.194 +STEP: Destroying namespace "resourcequota-7362" for this suite. 08/24/23 12:40:14.582 ------------------------------ -• [4.169 seconds] -[sig-storage] Downward API volume -test/e2e/common/storage/framework.go:23 - should provide container's cpu request [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:221 +• [SLOW TEST] [16.161 seconds] +[sig-api-machinery] ResourceQuota +test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a secret. [Conformance] + test/e2e/apimachinery/resource_quota.go:160 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Downward API volume + [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:32:19.04 - Jul 29 16:32:19.041: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 16:32:19.043 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:19.078 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:19.083 - [BeforeEach] [sig-storage] Downward API volume + STEP: Creating a kubernetes client 08/24/23 12:39:58.432 + Aug 24 12:39:58.433: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 12:39:58.436 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:39:58.47 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:39:58.476 + [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 - [It] should provide container's cpu request [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:221 - STEP: Creating a pod to test downward API volume plugin 07/29/23 16:32:19.088 - Jul 29 16:32:19.103: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4" in namespace "downward-api-6948" to be "Succeeded or Failed" - Jul 29 16:32:19.108: INFO: Pod "downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4": Phase="Pending", Reason="", readiness=false. Elapsed: 4.635727ms - Jul 29 16:32:21.116: INFO: Pod "downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012553219s - Jul 29 16:32:23.120: INFO: Pod "downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016602821s - STEP: Saw pod success 07/29/23 16:32:23.12 - Jul 29 16:32:23.121: INFO: Pod "downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4" satisfied condition "Succeeded or Failed" - Jul 29 16:32:23.128: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4 container client-container: - STEP: delete the pod 07/29/23 16:32:23.152 - Jul 29 16:32:23.179: INFO: Waiting for pod downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4 to disappear - Jul 29 16:32:23.184: INFO: Pod downwardapi-volume-c1c31d40-aae7-4bfd-8b18-972187d129f4 no longer exists - [AfterEach] [sig-storage] Downward API volume + [It] should create a ResourceQuota and capture the life of a secret. [Conformance] + test/e2e/apimachinery/resource_quota.go:160 + STEP: Discovering how many secrets are in namespace by default 08/24/23 12:39:58.482 + STEP: Counting existing ResourceQuota 08/24/23 12:40:03.49 + STEP: Creating a ResourceQuota 08/24/23 12:40:08.498 + STEP: Ensuring resource quota status is calculated 08/24/23 12:40:08.508 + STEP: Creating a Secret 08/24/23 12:40:10.517 + STEP: Ensuring resource quota status captures secret creation 08/24/23 12:40:10.549 + STEP: Deleting a secret 08/24/23 12:40:12.556 + STEP: Ensuring resource quota status released usage 08/24/23 12:40:12.567 + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:32:23.185: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Downward API volume + Aug 24 12:40:14.574: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-6948" for this suite. 07/29/23 16:32:23.194 + STEP: Destroying namespace "resourcequota-7362" for this suite. 08/24/23 12:40:14.582 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - works for multiple CRDs of same group and version but different kinds [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:357 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[sig-cli] Kubectl client Kubectl describe + should check if kubectl describe prints relevant information for rc and pods [Conformance] + test/e2e/kubectl/kubectl.go:1276 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:32:23.213 -Jul 29 16:32:23.214: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 16:32:23.216 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:23.245 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:23.251 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:40:14.599 +Aug 24 12:40:14.599: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:40:14.601 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:40:14.632 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:40:14.638 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[It] works for multiple CRDs of same group and version but different kinds [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:357 -STEP: CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation 07/29/23 16:32:23.256 -Jul 29 16:32:23.257: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:32:26.270: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[It] should check if kubectl describe prints relevant information for rc and pods [Conformance] + test/e2e/kubectl/kubectl.go:1276 +Aug 24 12:40:14.644: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 create -f -' +Aug 24 12:40:15.846: INFO: stderr: "" +Aug 24 12:40:15.846: INFO: stdout: "replicationcontroller/agnhost-primary created\n" +Aug 24 12:40:15.846: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 create -f -' +Aug 24 12:40:16.367: INFO: stderr: "" +Aug 24 12:40:16.367: INFO: stdout: "service/agnhost-primary created\n" +STEP: Waiting for Agnhost primary to start. 08/24/23 12:40:16.367 +Aug 24 12:40:17.377: INFO: Selector matched 1 pods for map[app:agnhost] +Aug 24 12:40:17.377: INFO: Found 0 / 1 +Aug 24 12:40:18.376: INFO: Selector matched 1 pods for map[app:agnhost] +Aug 24 12:40:18.376: INFO: Found 1 / 1 +Aug 24 12:40:18.376: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Aug 24 12:40:18.383: INFO: Selector matched 1 pods for map[app:agnhost] +Aug 24 12:40:18.383: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Aug 24 12:40:18.383: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 describe pod agnhost-primary-t2225' +Aug 24 12:40:18.560: INFO: stderr: "" +Aug 24 12:40:18.560: INFO: stdout: "Name: agnhost-primary-t2225\nNamespace: kubectl-5355\nPriority: 0\nService Account: default\nNode: pe9deep4seen-3/192.168.121.130\nStart Time: Thu, 24 Aug 2023 12:40:15 +0000\nLabels: app=agnhost\n role=primary\nAnnotations: \nStatus: Running\nIP: 10.233.66.147\nIPs:\n IP: 10.233.66.147\nControlled By: ReplicationController/agnhost-primary\nContainers:\n agnhost-primary:\n Container ID: cri-o://f8ed2548f861d05aa4a5a7983549b4ae1a79639e7b7b85fe8b08c6f77416549f\n Image: registry.k8s.io/e2e-test-images/agnhost:2.43\n Image ID: registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e\n Port: 6379/TCP\n Host Port: 0/TCP\n State: Running\n Started: Thu, 24 Aug 2023 12:40:16 +0000\n Ready: True\n Restart Count: 0\n Environment: \n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-d48tw (ro)\nConditions:\n Type Status\n Initialized True \n Ready True \n ContainersReady True \n PodScheduled True \nVolumes:\n kube-api-access-d48tw:\n Type: Projected (a volume that contains injected data from multiple sources)\n TokenExpirationSeconds: 3607\n ConfigMapName: kube-root-ca.crt\n ConfigMapOptional: \n DownwardAPI: true\nQoS Class: BestEffort\nNode-Selectors: \nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 3s default-scheduler Successfully assigned kubectl-5355/agnhost-primary-t2225 to pe9deep4seen-3\n Normal Pulled 2s kubelet Container image \"registry.k8s.io/e2e-test-images/agnhost:2.43\" already present on machine\n Normal Created 2s kubelet Created container agnhost-primary\n Normal Started 2s kubelet Started container agnhost-primary\n" +Aug 24 12:40:18.561: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 describe rc agnhost-primary' +Aug 24 12:40:18.725: INFO: stderr: "" +Aug 24 12:40:18.725: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-5355\nSelector: app=agnhost,role=primary\nLabels: app=agnhost\n role=primary\nAnnotations: \nReplicas: 1 current / 1 desired\nPods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n Labels: app=agnhost\n role=primary\n Containers:\n agnhost-primary:\n Image: registry.k8s.io/e2e-test-images/agnhost:2.43\n Port: 6379/TCP\n Host Port: 0/TCP\n Environment: \n Mounts: \n Volumes: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal SuccessfulCreate 3s replication-controller Created pod: agnhost-primary-t2225\n" +Aug 24 12:40:18.726: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 describe service agnhost-primary' +Aug 24 12:40:18.907: INFO: stderr: "" +Aug 24 12:40:18.907: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-5355\nLabels: app=agnhost\n role=primary\nAnnotations: \nSelector: app=agnhost,role=primary\nType: ClusterIP\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.233.41.156\nIPs: 10.233.41.156\nPort: 6379/TCP\nTargetPort: agnhost-server/TCP\nEndpoints: 10.233.66.147:6379\nSession Affinity: None\nEvents: \n" +Aug 24 12:40:18.920: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 describe node pe9deep4seen-1' +Aug 24 12:40:19.165: INFO: stderr: "" +Aug 24 12:40:19.166: INFO: stdout: "Name: pe9deep4seen-1\nRoles: control-plane\nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/os=linux\n kubernetes.io/arch=amd64\n kubernetes.io/hostname=pe9deep4seen-1\n kubernetes.io/os=linux\n node-role.kubernetes.io/control-plane=\n node.kubernetes.io/exclude-from-external-load-balancers=\nAnnotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock\n node.alpha.kubernetes.io/ttl: 0\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Thu, 24 Aug 2023 11:21:05 +0000\nTaints: \nUnschedulable: false\nLease:\n HolderIdentity: pe9deep4seen-1\n AcquireTime: \n RenewTime: Thu, 24 Aug 2023 12:40:16 +0000\nConditions:\n Type Status LastHeartbeatTime LastTransitionTime Reason Message\n ---- ------ ----------------- ------------------ ------ -------\n NetworkUnavailable False Thu, 24 Aug 2023 11:24:04 +0000 Thu, 24 Aug 2023 11:24:04 +0000 CiliumIsUp Cilium is running on this node\n MemoryPressure False Thu, 24 Aug 2023 12:39:34 +0000 Thu, 24 Aug 2023 11:20:56 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available\n DiskPressure False Thu, 24 Aug 2023 12:39:34 +0000 Thu, 24 Aug 2023 11:20:56 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure\n PIDPressure False Thu, 24 Aug 2023 12:39:34 +0000 Thu, 24 Aug 2023 11:20:56 +0000 KubeletHasSufficientPID kubelet has sufficient PID available\n Ready True Thu, 24 Aug 2023 12:39:34 +0000 Thu, 24 Aug 2023 11:25:00 +0000 KubeletReady kubelet is posting ready status. AppArmor enabled\nAddresses:\n InternalIP: 192.168.121.127\n Hostname: pe9deep4seen-1\nCapacity:\n cpu: 2\n ephemeral-storage: 115008636Ki\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 8123904Ki\n pods: 110\nAllocatable:\n cpu: 1600m\n ephemeral-storage: 111880401014\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 3274240Ki\n pods: 110\nSystem Info:\n Machine ID: 37a86d9f1ef842faaad122c27e6df1d5\n System UUID: 37a86d9f-1ef8-42fa-aad1-22c27e6df1d5\n Boot ID: c5ba4714-e129-4247-86e3-50c9d0a0b6ad\n Kernel Version: 6.2.0-26-generic\n OS Image: Ubuntu 22.04.3 LTS\n Operating System: linux\n Architecture: amd64\n Container Runtime Version: cri-o://1.26.4\n Kubelet Version: v1.26.8\n Kube-Proxy Version: v1.26.8\nPodCIDR: 10.233.64.0/24\nPodCIDRs: 10.233.64.0/24\nNon-terminated Pods: (10 in total)\n Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age\n --------- ---- ------------ ---------- --------------- ------------- ---\n kube-system cilium-node-init-wqpdx 100m (6%) 0 (0%) 100Mi (3%) 0 (0%) 77m\n kube-system cilium-wpzgb 100m (6%) 0 (0%) 100Mi (3%) 0 (0%) 77m\n kube-system coredns-787d4945fb-8jnm5 100m (6%) 0 (0%) 70Mi (2%) 170Mi (5%) 76m\n kube-system coredns-787d4945fb-d76z6 100m (6%) 0 (0%) 70Mi (2%) 170Mi (5%) 76m\n kube-system kube-addon-manager-pe9deep4seen-1 5m (0%) 0 (0%) 50Mi (1%) 0 (0%) 77m\n kube-system kube-apiserver-pe9deep4seen-1 250m (15%) 0 (0%) 0 (0%) 0 (0%) 79m\n kube-system kube-controller-manager-pe9deep4seen-1 200m (12%) 0 (0%) 0 (0%) 0 (0%) 79m\n kube-system kube-proxy-nr5bs 0 (0%) 0 (0%) 0 (0%) 0 (0%) 78m\n kube-system kube-scheduler-pe9deep4seen-1 100m (6%) 0 (0%) 0 (0%) 0 (0%) 79m\n sonobuoy sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw 0 (0%) 0 (0%) 0 (0%) 0 (0%) 61m\nAllocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n Resource Requests Limits\n -------- -------- ------\n cpu 955m (59%) 0 (0%)\n memory 390Mi (12%) 340Mi (10%)\n ephemeral-storage 0 (0%) 0 (0%)\n hugepages-1Gi 0 (0%) 0 (0%)\n hugepages-2Mi 0 (0%) 0 (0%)\nEvents: \n" +Aug 24 12:40:19.166: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 describe namespace kubectl-5355' +Aug 24 12:40:19.335: INFO: stderr: "" +Aug 24 12:40:19.335: INFO: stdout: "Name: kubectl-5355\nLabels: e2e-framework=kubectl\n e2e-run=e37f2036-3a54-4653-ada1-c01489d8d1f1\n kubernetes.io/metadata.name=kubectl-5355\n pod-security.kubernetes.io/enforce=baseline\nAnnotations: \nStatus: Active\n\nNo resource quota.\n\nNo LimitRange resource.\n" +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 16:32:35.799: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +Aug 24 12:40:19.335: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "crd-publish-openapi-6335" for this suite. 07/29/23 16:32:35.816 +STEP: Destroying namespace "kubectl-5355" for this suite. 08/24/23 12:40:19.345 ------------------------------ -• [SLOW TEST] [12.616 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - works for multiple CRDs of same group and version but different kinds [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:357 +• [4.770 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Kubectl describe + test/e2e/kubectl/kubectl.go:1270 + should check if kubectl describe prints relevant information for rc and pods [Conformance] + test/e2e/kubectl/kubectl.go:1276 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:32:23.213 - Jul 29 16:32:23.214: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 16:32:23.216 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:23.245 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:23.251 - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:40:14.599 + Aug 24 12:40:14.599: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:40:14.601 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:40:14.632 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:40:14.638 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [It] works for multiple CRDs of same group and version but different kinds [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:357 - STEP: CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation 07/29/23 16:32:23.256 - Jul 29 16:32:23.257: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:32:26.270: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [It] should check if kubectl describe prints relevant information for rc and pods [Conformance] + test/e2e/kubectl/kubectl.go:1276 + Aug 24 12:40:14.644: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 create -f -' + Aug 24 12:40:15.846: INFO: stderr: "" + Aug 24 12:40:15.846: INFO: stdout: "replicationcontroller/agnhost-primary created\n" + Aug 24 12:40:15.846: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 create -f -' + Aug 24 12:40:16.367: INFO: stderr: "" + Aug 24 12:40:16.367: INFO: stdout: "service/agnhost-primary created\n" + STEP: Waiting for Agnhost primary to start. 08/24/23 12:40:16.367 + Aug 24 12:40:17.377: INFO: Selector matched 1 pods for map[app:agnhost] + Aug 24 12:40:17.377: INFO: Found 0 / 1 + Aug 24 12:40:18.376: INFO: Selector matched 1 pods for map[app:agnhost] + Aug 24 12:40:18.376: INFO: Found 1 / 1 + Aug 24 12:40:18.376: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 + Aug 24 12:40:18.383: INFO: Selector matched 1 pods for map[app:agnhost] + Aug 24 12:40:18.383: INFO: ForEach: Found 1 pods from the filter. Now looping through them. + Aug 24 12:40:18.383: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 describe pod agnhost-primary-t2225' + Aug 24 12:40:18.560: INFO: stderr: "" + Aug 24 12:40:18.560: INFO: stdout: "Name: agnhost-primary-t2225\nNamespace: kubectl-5355\nPriority: 0\nService Account: default\nNode: pe9deep4seen-3/192.168.121.130\nStart Time: Thu, 24 Aug 2023 12:40:15 +0000\nLabels: app=agnhost\n role=primary\nAnnotations: \nStatus: Running\nIP: 10.233.66.147\nIPs:\n IP: 10.233.66.147\nControlled By: ReplicationController/agnhost-primary\nContainers:\n agnhost-primary:\n Container ID: cri-o://f8ed2548f861d05aa4a5a7983549b4ae1a79639e7b7b85fe8b08c6f77416549f\n Image: registry.k8s.io/e2e-test-images/agnhost:2.43\n Image ID: registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e\n Port: 6379/TCP\n Host Port: 0/TCP\n State: Running\n Started: Thu, 24 Aug 2023 12:40:16 +0000\n Ready: True\n Restart Count: 0\n Environment: \n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-d48tw (ro)\nConditions:\n Type Status\n Initialized True \n Ready True \n ContainersReady True \n PodScheduled True \nVolumes:\n kube-api-access-d48tw:\n Type: Projected (a volume that contains injected data from multiple sources)\n TokenExpirationSeconds: 3607\n ConfigMapName: kube-root-ca.crt\n ConfigMapOptional: \n DownwardAPI: true\nQoS Class: BestEffort\nNode-Selectors: \nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 3s default-scheduler Successfully assigned kubectl-5355/agnhost-primary-t2225 to pe9deep4seen-3\n Normal Pulled 2s kubelet Container image \"registry.k8s.io/e2e-test-images/agnhost:2.43\" already present on machine\n Normal Created 2s kubelet Created container agnhost-primary\n Normal Started 2s kubelet Started container agnhost-primary\n" + Aug 24 12:40:18.561: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 describe rc agnhost-primary' + Aug 24 12:40:18.725: INFO: stderr: "" + Aug 24 12:40:18.725: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-5355\nSelector: app=agnhost,role=primary\nLabels: app=agnhost\n role=primary\nAnnotations: \nReplicas: 1 current / 1 desired\nPods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n Labels: app=agnhost\n role=primary\n Containers:\n agnhost-primary:\n Image: registry.k8s.io/e2e-test-images/agnhost:2.43\n Port: 6379/TCP\n Host Port: 0/TCP\n Environment: \n Mounts: \n Volumes: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal SuccessfulCreate 3s replication-controller Created pod: agnhost-primary-t2225\n" + Aug 24 12:40:18.726: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 describe service agnhost-primary' + Aug 24 12:40:18.907: INFO: stderr: "" + Aug 24 12:40:18.907: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-5355\nLabels: app=agnhost\n role=primary\nAnnotations: \nSelector: app=agnhost,role=primary\nType: ClusterIP\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.233.41.156\nIPs: 10.233.41.156\nPort: 6379/TCP\nTargetPort: agnhost-server/TCP\nEndpoints: 10.233.66.147:6379\nSession Affinity: None\nEvents: \n" + Aug 24 12:40:18.920: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 describe node pe9deep4seen-1' + Aug 24 12:40:19.165: INFO: stderr: "" + Aug 24 12:40:19.166: INFO: stdout: "Name: pe9deep4seen-1\nRoles: control-plane\nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/os=linux\n kubernetes.io/arch=amd64\n kubernetes.io/hostname=pe9deep4seen-1\n kubernetes.io/os=linux\n node-role.kubernetes.io/control-plane=\n node.kubernetes.io/exclude-from-external-load-balancers=\nAnnotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/crio/crio.sock\n node.alpha.kubernetes.io/ttl: 0\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Thu, 24 Aug 2023 11:21:05 +0000\nTaints: \nUnschedulable: false\nLease:\n HolderIdentity: pe9deep4seen-1\n AcquireTime: \n RenewTime: Thu, 24 Aug 2023 12:40:16 +0000\nConditions:\n Type Status LastHeartbeatTime LastTransitionTime Reason Message\n ---- ------ ----------------- ------------------ ------ -------\n NetworkUnavailable False Thu, 24 Aug 2023 11:24:04 +0000 Thu, 24 Aug 2023 11:24:04 +0000 CiliumIsUp Cilium is running on this node\n MemoryPressure False Thu, 24 Aug 2023 12:39:34 +0000 Thu, 24 Aug 2023 11:20:56 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available\n DiskPressure False Thu, 24 Aug 2023 12:39:34 +0000 Thu, 24 Aug 2023 11:20:56 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure\n PIDPressure False Thu, 24 Aug 2023 12:39:34 +0000 Thu, 24 Aug 2023 11:20:56 +0000 KubeletHasSufficientPID kubelet has sufficient PID available\n Ready True Thu, 24 Aug 2023 12:39:34 +0000 Thu, 24 Aug 2023 11:25:00 +0000 KubeletReady kubelet is posting ready status. AppArmor enabled\nAddresses:\n InternalIP: 192.168.121.127\n Hostname: pe9deep4seen-1\nCapacity:\n cpu: 2\n ephemeral-storage: 115008636Ki\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 8123904Ki\n pods: 110\nAllocatable:\n cpu: 1600m\n ephemeral-storage: 111880401014\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 3274240Ki\n pods: 110\nSystem Info:\n Machine ID: 37a86d9f1ef842faaad122c27e6df1d5\n System UUID: 37a86d9f-1ef8-42fa-aad1-22c27e6df1d5\n Boot ID: c5ba4714-e129-4247-86e3-50c9d0a0b6ad\n Kernel Version: 6.2.0-26-generic\n OS Image: Ubuntu 22.04.3 LTS\n Operating System: linux\n Architecture: amd64\n Container Runtime Version: cri-o://1.26.4\n Kubelet Version: v1.26.8\n Kube-Proxy Version: v1.26.8\nPodCIDR: 10.233.64.0/24\nPodCIDRs: 10.233.64.0/24\nNon-terminated Pods: (10 in total)\n Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age\n --------- ---- ------------ ---------- --------------- ------------- ---\n kube-system cilium-node-init-wqpdx 100m (6%) 0 (0%) 100Mi (3%) 0 (0%) 77m\n kube-system cilium-wpzgb 100m (6%) 0 (0%) 100Mi (3%) 0 (0%) 77m\n kube-system coredns-787d4945fb-8jnm5 100m (6%) 0 (0%) 70Mi (2%) 170Mi (5%) 76m\n kube-system coredns-787d4945fb-d76z6 100m (6%) 0 (0%) 70Mi (2%) 170Mi (5%) 76m\n kube-system kube-addon-manager-pe9deep4seen-1 5m (0%) 0 (0%) 50Mi (1%) 0 (0%) 77m\n kube-system kube-apiserver-pe9deep4seen-1 250m (15%) 0 (0%) 0 (0%) 0 (0%) 79m\n kube-system kube-controller-manager-pe9deep4seen-1 200m (12%) 0 (0%) 0 (0%) 0 (0%) 79m\n kube-system kube-proxy-nr5bs 0 (0%) 0 (0%) 0 (0%) 0 (0%) 78m\n kube-system kube-scheduler-pe9deep4seen-1 100m (6%) 0 (0%) 0 (0%) 0 (0%) 79m\n sonobuoy sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw 0 (0%) 0 (0%) 0 (0%) 0 (0%) 61m\nAllocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n Resource Requests Limits\n -------- -------- ------\n cpu 955m (59%) 0 (0%)\n memory 390Mi (12%) 340Mi (10%)\n ephemeral-storage 0 (0%) 0 (0%)\n hugepages-1Gi 0 (0%) 0 (0%)\n hugepages-2Mi 0 (0%) 0 (0%)\nEvents: \n" + Aug 24 12:40:19.166: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5355 describe namespace kubectl-5355' + Aug 24 12:40:19.335: INFO: stderr: "" + Aug 24 12:40:19.335: INFO: stdout: "Name: kubectl-5355\nLabels: e2e-framework=kubectl\n e2e-run=e37f2036-3a54-4653-ada1-c01489d8d1f1\n kubernetes.io/metadata.name=kubectl-5355\n pod-security.kubernetes.io/enforce=baseline\nAnnotations: \nStatus: Active\n\nNo resource quota.\n\nNo LimitRange resource.\n" + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 16:32:35.799: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + Aug 24 12:40:19.335: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "crd-publish-openapi-6335" for this suite. 07/29/23 16:32:35.816 + STEP: Destroying namespace "kubectl-5355" for this suite. 08/24/23 12:40:19.345 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +S ------------------------------ -[sig-network] Proxy version v1 - should proxy through a service and a pod [Conformance] - test/e2e/network/proxy.go:101 -[BeforeEach] version v1 +[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints + verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] + test/e2e/scheduling/preemption.go:814 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:40:19.37 +Aug 24 12:40:19.370: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sched-preemption 08/24/23 12:40:19.376 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:40:19.426 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:40:19.431 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:97 +Aug 24 12:40:19.469: INFO: Waiting up to 1m0s for all nodes to be ready +Aug 24 12:41:19.529: INFO: Waiting for terminating namespaces to be deleted... +[BeforeEach] PriorityClass endpoints set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:32:35.832 -Jul 29 16:32:35.832: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename proxy 07/29/23 16:32:35.834 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:35.916 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:35.923 -[BeforeEach] version v1 +STEP: Creating a kubernetes client 08/24/23 12:41:19.536 +Aug 24 12:41:19.536: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sched-preemption-path 08/24/23 12:41:19.538 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:19.598 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:19.604 +[BeforeEach] PriorityClass endpoints test/e2e/framework/metrics/init/init.go:31 -[It] should proxy through a service and a pod [Conformance] - test/e2e/network/proxy.go:101 -STEP: starting an echo server on multiple ports 07/29/23 16:32:35.954 -STEP: creating replication controller proxy-service-68xg5 in namespace proxy-7213 07/29/23 16:32:35.954 -I0729 16:32:35.977151 13 runners.go:193] Created replication controller with name: proxy-service-68xg5, namespace: proxy-7213, replica count: 1 -I0729 16:32:37.029235 13 runners.go:193] proxy-service-68xg5 Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -I0729 16:32:38.030373 13 runners.go:193] proxy-service-68xg5 Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Jul 29 16:32:38.039: INFO: setup took 2.110063719s, starting test cases -STEP: running 16 cases, 20 attempts per case, 320 total attempts 07/29/23 16:32:38.039 -Jul 29 16:32:38.061: INFO: (0) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 19.996151ms) -Jul 29 16:32:38.067: INFO: (0) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 25.983211ms) -Jul 29 16:32:38.068: INFO: (0) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 28.318311ms) -Jul 29 16:32:38.074: INFO: (0) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 33.675477ms) -Jul 29 16:32:38.074: INFO: (0) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 31.708547ms) -Jul 29 16:32:38.075: INFO: (0) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 32.271334ms) -Jul 29 16:32:38.076: INFO: (0) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 34.33376ms) -Jul 29 16:32:38.077: INFO: (0) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 34.629799ms) -Jul 29 16:32:38.077: INFO: (0) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 36.300477ms) -Jul 29 16:32:38.077: INFO: (0) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 35.536541ms) -Jul 29 16:32:38.083: INFO: (0) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 42.336354ms) -Jul 29 16:32:38.084: INFO: (0) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 40.343248ms) -Jul 29 16:32:38.084: INFO: (0) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 43.839986ms) -Jul 29 16:32:38.084: INFO: (0) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 42.578167ms) -Jul 29 16:32:38.085: INFO: (0) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 41.940973ms) -Jul 29 16:32:38.086: INFO: (0) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 20.210314ms) -Jul 29 16:32:38.108: INFO: (1) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 20.618224ms) -Jul 29 16:32:38.108: INFO: (1) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 19.888169ms) -Jul 29 16:32:38.110: INFO: (1) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 22.146402ms) -Jul 29 16:32:38.110: INFO: (1) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 23.420182ms) -Jul 29 16:32:38.112: INFO: (1) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 25.056807ms) -Jul 29 16:32:38.114: INFO: (1) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 27.632571ms) -Jul 29 16:32:38.118: INFO: (1) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 30.777474ms) -Jul 29 16:32:38.118: INFO: (1) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 30.325743ms) -Jul 29 16:32:38.119: INFO: (1) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 29.761579ms) -Jul 29 16:32:38.119: INFO: (1) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 18.305499ms) -Jul 29 16:32:38.143: INFO: (2) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 19.146402ms) -Jul 29 16:32:38.148: INFO: (2) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 23.227503ms) -Jul 29 16:32:38.149: INFO: (2) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 23.218783ms) -Jul 29 16:32:38.149: INFO: (2) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 23.275229ms) -Jul 29 16:32:38.152: INFO: (2) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test<... (200; 28.123768ms) -Jul 29 16:32:38.153: INFO: (2) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 27.301827ms) -Jul 29 16:32:38.153: INFO: (2) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 28.324763ms) -Jul 29 16:32:38.153: INFO: (2) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 29.436572ms) -Jul 29 16:32:38.158: INFO: (2) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 32.498911ms) -Jul 29 16:32:38.158: INFO: (2) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 32.75343ms) -Jul 29 16:32:38.166: INFO: (3) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 7.370553ms) -Jul 29 16:32:38.180: INFO: (3) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 21.079944ms) -Jul 29 16:32:38.181: INFO: (3) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 20.151911ms) -Jul 29 16:32:38.180: INFO: (3) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 22.0371ms) -Jul 29 16:32:38.181: INFO: (3) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 20.665373ms) -Jul 29 16:32:38.183: INFO: (3) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 23.598603ms) -Jul 29 16:32:38.185: INFO: (3) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 22.840135ms) -Jul 29 16:32:38.185: INFO: (3) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 25.65389ms) -Jul 29 16:32:38.186: INFO: (3) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 28.151786ms) -Jul 29 16:32:38.187: INFO: (3) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 26.190363ms) -Jul 29 16:32:38.189: INFO: (3) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 26.687297ms) -Jul 29 16:32:38.189: INFO: (3) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 25.950447ms) -Jul 29 16:32:38.189: INFO: (3) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 29.658069ms) -Jul 29 16:32:38.190: INFO: (3) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 29.382829ms) -Jul 29 16:32:38.195: INFO: (3) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 34.54882ms) -Jul 29 16:32:38.211: INFO: (4) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 15.300727ms) -Jul 29 16:32:38.214: INFO: (4) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 18.397157ms) -Jul 29 16:32:38.214: INFO: (4) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 17.831575ms) -Jul 29 16:32:38.214: INFO: (4) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 18.792654ms) -Jul 29 16:32:38.215: INFO: (4) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 18.806711ms) -Jul 29 16:32:38.220: INFO: (4) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 23.892525ms) -Jul 29 16:32:38.220: INFO: (4) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 24.416276ms) -Jul 29 16:32:38.221: INFO: (4) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 24.604502ms) -Jul 29 16:32:38.221: INFO: (4) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 24.323787ms) -Jul 29 16:32:38.221: INFO: (4) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 24.982048ms) -Jul 29 16:32:38.221: INFO: (4) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 25.079856ms) -Jul 29 16:32:38.223: INFO: (4) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 26.50332ms) -Jul 29 16:32:38.223: INFO: (4) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 26.702388ms) -Jul 29 16:32:38.223: INFO: (4) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 27.232555ms) -Jul 29 16:32:38.235: INFO: (5) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 11.070774ms) -Jul 29 16:32:38.235: INFO: (5) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 11.47216ms) -Jul 29 16:32:38.236: INFO: (5) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 11.847249ms) -Jul 29 16:32:38.238: INFO: (5) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 13.273147ms) -Jul 29 16:32:38.238: INFO: (5) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 14.638505ms) -Jul 29 16:32:38.243: INFO: (5) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 18.694251ms) -Jul 29 16:32:38.244: INFO: (5) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 19.661021ms) -Jul 29 16:32:38.247: INFO: (5) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 22.351168ms) -Jul 29 16:32:38.247: INFO: (5) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 22.522375ms) -Jul 29 16:32:38.251: INFO: (5) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 27.379255ms) -Jul 29 16:32:38.252: INFO: (5) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 26.90698ms) -Jul 29 16:32:38.252: INFO: (5) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 28.173772ms) -Jul 29 16:32:38.252: INFO: (5) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 27.513213ms) -Jul 29 16:32:38.253: INFO: (5) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test<... (200; 18.641101ms) -Jul 29 16:32:38.275: INFO: (6) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 18.371055ms) -Jul 29 16:32:38.276: INFO: (6) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 18.679446ms) -Jul 29 16:32:38.276: INFO: (6) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 19.860368ms) -Jul 29 16:32:38.276: INFO: (6) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 20.067863ms) -Jul 29 16:32:38.277: INFO: (6) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 20.524039ms) -Jul 29 16:32:38.279: INFO: (6) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 45.705664ms) -Jul 29 16:32:38.332: INFO: (7) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 47.116552ms) -Jul 29 16:32:38.338: INFO: (7) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 51.894699ms) -Jul 29 16:32:38.338: INFO: (7) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 53.242561ms) -Jul 29 16:32:38.338: INFO: (7) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 53.165928ms) -Jul 29 16:32:38.338: INFO: (7) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 51.186611ms) -Jul 29 16:32:38.339: INFO: (7) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 52.103114ms) -Jul 29 16:32:38.341: INFO: (7) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 54.061722ms) -Jul 29 16:32:38.342: INFO: (7) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 55.561833ms) -Jul 29 16:32:38.342: INFO: (7) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 56.170381ms) -Jul 29 16:32:38.342: INFO: (7) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 56.904539ms) -Jul 29 16:32:38.343: INFO: (7) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 57.357636ms) -Jul 29 16:32:38.343: INFO: (7) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 57.706114ms) -Jul 29 16:32:38.343: INFO: (7) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 57.345871ms) -Jul 29 16:32:38.344: INFO: (7) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 56.789962ms) -Jul 29 16:32:38.357: INFO: (8) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 11.652892ms) -Jul 29 16:32:38.358: INFO: (8) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 20.21156ms) -Jul 29 16:32:38.365: INFO: (8) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 19.48274ms) -Jul 29 16:32:38.365: INFO: (8) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 21.562637ms) -Jul 29 16:32:38.366: INFO: (8) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 21.385685ms) -Jul 29 16:32:38.366: INFO: (8) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 20.934887ms) -Jul 29 16:32:38.366: INFO: (8) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 22.327894ms) -Jul 29 16:32:38.366: INFO: (8) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 21.261937ms) -Jul 29 16:32:38.367: INFO: (8) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 21.112635ms) -Jul 29 16:32:38.371: INFO: (8) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 25.30134ms) -Jul 29 16:32:38.371: INFO: (8) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 25.289483ms) -Jul 29 16:32:38.371: INFO: (8) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 27.362474ms) -Jul 29 16:32:38.372: INFO: (8) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 27.169282ms) -Jul 29 16:32:38.373: INFO: (8) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 27.289121ms) -Jul 29 16:32:38.373: INFO: (8) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 27.466856ms) -Jul 29 16:32:38.388: INFO: (9) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 13.070894ms) -Jul 29 16:32:38.391: INFO: (9) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 15.958927ms) -Jul 29 16:32:38.392: INFO: (9) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 18.149933ms) -Jul 29 16:32:38.392: INFO: (9) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 18.518866ms) -Jul 29 16:32:38.392: INFO: (9) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 18.221312ms) -Jul 29 16:32:38.393: INFO: (9) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 18.544974ms) -Jul 29 16:32:38.397: INFO: (9) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 22.078824ms) -Jul 29 16:32:38.399: INFO: (9) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 24.103639ms) -Jul 29 16:32:38.400: INFO: (9) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 24.757922ms) -Jul 29 16:32:38.400: INFO: (9) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 25.092239ms) -Jul 29 16:32:38.401: INFO: (9) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 25.527895ms) -Jul 29 16:32:38.401: INFO: (9) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 25.63094ms) -Jul 29 16:32:38.401: INFO: (9) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 27.208797ms) -Jul 29 16:32:38.401: INFO: (9) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 25.93489ms) -Jul 29 16:32:38.401: INFO: (9) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 26.072914ms) -Jul 29 16:32:38.416: INFO: (10) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 13.798904ms) -Jul 29 16:32:38.416: INFO: (10) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 14.002618ms) -Jul 29 16:32:38.421: INFO: (10) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 19.92713ms) -Jul 29 16:32:38.423: INFO: (10) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 20.429733ms) -Jul 29 16:32:38.424: INFO: (10) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 21.45366ms) -Jul 29 16:32:38.426: INFO: (10) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 23.921033ms) -Jul 29 16:32:38.426: INFO: (10) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 22.542962ms) -Jul 29 16:32:38.426: INFO: (10) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 24.656978ms) -Jul 29 16:32:38.427: INFO: (10) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 24.37089ms) -Jul 29 16:32:38.427: INFO: (10) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 24.071904ms) -Jul 29 16:32:38.427: INFO: (10) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 24.688448ms) -Jul 29 16:32:38.427: INFO: (10) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 25.079589ms) -Jul 29 16:32:38.429: INFO: (10) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 26.706183ms) -Jul 29 16:32:38.438: INFO: (11) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 7.717702ms) -Jul 29 16:32:38.442: INFO: (11) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 12.094704ms) -Jul 29 16:32:38.443: INFO: (11) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 13.482004ms) -Jul 29 16:32:38.445: INFO: (11) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 14.761708ms) -Jul 29 16:32:38.445: INFO: (11) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 14.914377ms) -Jul 29 16:32:38.446: INFO: (11) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 15.665083ms) -Jul 29 16:32:38.446: INFO: (11) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 20.047575ms) -Jul 29 16:32:38.450: INFO: (11) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 18.695696ms) -Jul 29 16:32:38.451: INFO: (11) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 19.979593ms) -Jul 29 16:32:38.453: INFO: (11) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 22.195757ms) -Jul 29 16:32:38.454: INFO: (11) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 23.335932ms) -Jul 29 16:32:38.454: INFO: (11) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 24.637117ms) -Jul 29 16:32:38.455: INFO: (11) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 23.533031ms) -Jul 29 16:32:38.456: INFO: (11) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 24.895801ms) -Jul 29 16:32:38.456: INFO: (11) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 24.61144ms) -Jul 29 16:32:38.473: INFO: (12) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 17.150625ms) -Jul 29 16:32:38.474: INFO: (12) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 18.181479ms) -Jul 29 16:32:38.475: INFO: (12) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 18.41795ms) -Jul 29 16:32:38.475: INFO: (12) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 18.612306ms) -Jul 29 16:32:38.475: INFO: (12) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 18.703594ms) -Jul 29 16:32:38.476: INFO: (12) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 18.622348ms) -Jul 29 16:32:38.476: INFO: (12) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 18.756314ms) -Jul 29 16:32:38.476: INFO: (12) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 18.86006ms) -Jul 29 16:32:38.477: INFO: (12) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 21.075159ms) -Jul 29 16:32:38.478: INFO: (12) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 20.574993ms) -Jul 29 16:32:38.478: INFO: (12) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 21.429286ms) -Jul 29 16:32:38.478: INFO: (12) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 21.578751ms) -Jul 29 16:32:38.478: INFO: (12) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 21.083842ms) -Jul 29 16:32:38.480: INFO: (12) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 23.01444ms) -Jul 29 16:32:38.479: INFO: (12) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 22.900947ms) -Jul 29 16:32:38.496: INFO: (13) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 16.062291ms) -Jul 29 16:32:38.496: INFO: (13) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 15.964718ms) -Jul 29 16:32:38.497: INFO: (13) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 17.231809ms) -Jul 29 16:32:38.498: INFO: (13) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 17.05065ms) -Jul 29 16:32:38.498: INFO: (13) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 17.387522ms) -Jul 29 16:32:38.504: INFO: (13) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 23.484413ms) -Jul 29 16:32:38.504: INFO: (13) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 23.272594ms) -Jul 29 16:32:38.504: INFO: (13) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 23.649997ms) -Jul 29 16:32:38.504: INFO: (13) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 23.401318ms) -Jul 29 16:32:38.505: INFO: (13) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 24.601755ms) -Jul 29 16:32:38.505: INFO: (13) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test<... (200; 29.498103ms) -Jul 29 16:32:38.554: INFO: (14) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 37.166995ms) -Jul 29 16:32:38.554: INFO: (14) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 36.859122ms) -Jul 29 16:32:38.554: INFO: (14) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 36.280022ms) -Jul 29 16:32:38.554: INFO: (14) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 36.738322ms) -Jul 29 16:32:38.554: INFO: (14) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 36.329904ms) -Jul 29 16:32:38.555: INFO: (14) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 36.76148ms) -Jul 29 16:32:38.555: INFO: (14) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 37.368383ms) -Jul 29 16:32:38.556: INFO: (14) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 47.412394ms) -Jul 29 16:32:38.613: INFO: (15) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 46.963681ms) -Jul 29 16:32:38.613: INFO: (15) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 47.037565ms) -Jul 29 16:32:38.613: INFO: (15) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 47.675067ms) -Jul 29 16:32:38.613: INFO: (15) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test<... (200; 52.336041ms) -Jul 29 16:32:38.619: INFO: (15) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 52.881551ms) -Jul 29 16:32:38.619: INFO: (15) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 53.128024ms) -Jul 29 16:32:38.620: INFO: (15) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 53.322345ms) -Jul 29 16:32:38.620: INFO: (15) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 53.543872ms) -Jul 29 16:32:38.620: INFO: (15) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 53.756934ms) -Jul 29 16:32:38.627: INFO: (15) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 60.858675ms) -Jul 29 16:32:38.640: INFO: (16) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 13.085609ms) -Jul 29 16:32:38.641: INFO: (16) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 14.446247ms) -Jul 29 16:32:38.643: INFO: (16) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 15.464608ms) -Jul 29 16:32:38.651: INFO: (16) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 23.124268ms) -Jul 29 16:32:38.651: INFO: (16) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 23.376991ms) -Jul 29 16:32:38.651: INFO: (16) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 23.768777ms) -Jul 29 16:32:38.651: INFO: (16) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 23.753462ms) -Jul 29 16:32:38.658: INFO: (16) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 30.712034ms) -Jul 29 16:32:38.659: INFO: (16) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 31.224825ms) -Jul 29 16:32:38.660: INFO: (16) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 24.231184ms) -Jul 29 16:32:38.690: INFO: (17) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 24.113942ms) -Jul 29 16:32:38.690: INFO: (17) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 24.956544ms) -Jul 29 16:32:38.690: INFO: (17) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 24.899526ms) -Jul 29 16:32:38.690: INFO: (17) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 24.701748ms) -Jul 29 16:32:38.691: INFO: (17) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 25.098712ms) -Jul 29 16:32:38.691: INFO: (17) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 25.225886ms) -Jul 29 16:32:38.692: INFO: (17) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 26.485045ms) -Jul 29 16:32:38.693: INFO: (17) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 27.120545ms) -Jul 29 16:32:38.695: INFO: (17) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 28.676816ms) -Jul 29 16:32:38.695: INFO: (17) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 29.508012ms) -Jul 29 16:32:38.696: INFO: (17) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 30.55996ms) -Jul 29 16:32:38.698: INFO: (17) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 32.56387ms) -Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 17.766121ms) -Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 18.087937ms) -Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 17.780651ms) -Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 18.335909ms) -Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 19.039791ms) -Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 19.205185ms) -Jul 29 16:32:38.718: INFO: (18) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 19.372371ms) -Jul 29 16:32:38.720: INFO: (18) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 22.000191ms) -Jul 29 16:32:38.721: INFO: (18) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 22.503433ms) -Jul 29 16:32:38.723: INFO: (18) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 24.920834ms) -Jul 29 16:32:38.724: INFO: (18) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 26.036028ms) -Jul 29 16:32:38.724: INFO: (18) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 25.727231ms) -Jul 29 16:32:38.730: INFO: (18) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 31.339893ms) -Jul 29 16:32:38.730: INFO: (18) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 31.479603ms) -Jul 29 16:32:38.731: INFO: (18) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 33.055804ms) -Jul 29 16:32:38.732: INFO: (18) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 27.061275ms) -Jul 29 16:32:38.760: INFO: (19) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 27.485195ms) -Jul 29 16:32:38.762: INFO: (19) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 31.712476ms) -Jul 29 16:32:38.764: INFO: (19) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 31.068495ms) -Jul 29 16:32:38.766: INFO: (19) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 33.451585ms) -Jul 29 16:32:38.767: INFO: (19) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 33.971991ms) -Jul 29 16:32:38.768: INFO: (19) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 35.196311ms) -Jul 29 16:32:38.772: INFO: (19) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 39.33797ms) -Jul 29 16:32:38.772: INFO: (19) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 39.675245ms) -Jul 29 16:32:38.773: INFO: (19) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 39.640187ms) -Jul 29 16:32:38.773: INFO: (19) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 40.132488ms) -Jul 29 16:32:38.773: INFO: (19) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 40.139114ms) -Jul 29 16:32:38.774: INFO: (19) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 41.731195ms) -Jul 29 16:32:38.774: INFO: (19) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 41.249556ms) -STEP: deleting ReplicationController proxy-service-68xg5 in namespace proxy-7213, will wait for the garbage collector to delete the pods 07/29/23 16:32:38.775 -Jul 29 16:32:38.860: INFO: Deleting ReplicationController proxy-service-68xg5 took: 27.072243ms -Jul 29 16:32:38.961: INFO: Terminating ReplicationController proxy-service-68xg5 pods took: 100.740883ms -[AfterEach] version v1 +[BeforeEach] PriorityClass endpoints + test/e2e/scheduling/preemption.go:771 +[It] verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] + test/e2e/scheduling/preemption.go:814 +Aug 24 12:41:19.641: INFO: PriorityClass.scheduling.k8s.io "p1" is invalid: value: Forbidden: may not be changed in an update. +Aug 24 12:41:19.650: INFO: PriorityClass.scheduling.k8s.io "p2" is invalid: value: Forbidden: may not be changed in an update. +[AfterEach] PriorityClass endpoints test/e2e/framework/node/init/init.go:32 -Jul 29 16:32:41.063: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] version v1 +Aug 24 12:41:19.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] PriorityClass endpoints + test/e2e/scheduling/preemption.go:787 +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/framework/node/init/init.go:32 +Aug 24 12:41:19.726: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:84 +[DeferCleanup (Each)] PriorityClass endpoints test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] version v1 +[DeferCleanup (Each)] PriorityClass endpoints dump namespaces | framework.go:196 -[DeferCleanup (Each)] version v1 +[DeferCleanup (Each)] PriorityClass endpoints + tear down framework | framework.go:193 +STEP: Destroying namespace "sched-preemption-path-1328" for this suite. 08/24/23 12:41:19.821 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "proxy-7213" for this suite. 07/29/23 16:32:41.072 +STEP: Destroying namespace "sched-preemption-6369" for this suite. 08/24/23 12:41:19.835 ------------------------------ -• [SLOW TEST] [5.250 seconds] -[sig-network] Proxy -test/e2e/network/common/framework.go:23 - version v1 - test/e2e/network/proxy.go:74 - should proxy through a service and a pod [Conformance] - test/e2e/network/proxy.go:101 +• [SLOW TEST] [60.478 seconds] +[sig-scheduling] SchedulerPreemption [Serial] +test/e2e/scheduling/framework.go:40 + PriorityClass endpoints + test/e2e/scheduling/preemption.go:764 + verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] + test/e2e/scheduling/preemption.go:814 Begin Captured GinkgoWriter Output >> - [BeforeEach] version v1 + [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:32:35.832 - Jul 29 16:32:35.832: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename proxy 07/29/23 16:32:35.834 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:35.916 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:35.923 - [BeforeEach] version v1 + STEP: Creating a kubernetes client 08/24/23 12:40:19.37 + Aug 24 12:40:19.370: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sched-preemption 08/24/23 12:40:19.376 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:40:19.426 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:40:19.431 + [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should proxy through a service and a pod [Conformance] - test/e2e/network/proxy.go:101 - STEP: starting an echo server on multiple ports 07/29/23 16:32:35.954 - STEP: creating replication controller proxy-service-68xg5 in namespace proxy-7213 07/29/23 16:32:35.954 - I0729 16:32:35.977151 13 runners.go:193] Created replication controller with name: proxy-service-68xg5, namespace: proxy-7213, replica count: 1 - I0729 16:32:37.029235 13 runners.go:193] proxy-service-68xg5 Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - I0729 16:32:38.030373 13 runners.go:193] proxy-service-68xg5 Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - Jul 29 16:32:38.039: INFO: setup took 2.110063719s, starting test cases - STEP: running 16 cases, 20 attempts per case, 320 total attempts 07/29/23 16:32:38.039 - Jul 29 16:32:38.061: INFO: (0) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 19.996151ms) - Jul 29 16:32:38.067: INFO: (0) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 25.983211ms) - Jul 29 16:32:38.068: INFO: (0) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 28.318311ms) - Jul 29 16:32:38.074: INFO: (0) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 33.675477ms) - Jul 29 16:32:38.074: INFO: (0) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 31.708547ms) - Jul 29 16:32:38.075: INFO: (0) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 32.271334ms) - Jul 29 16:32:38.076: INFO: (0) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 34.33376ms) - Jul 29 16:32:38.077: INFO: (0) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 34.629799ms) - Jul 29 16:32:38.077: INFO: (0) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 36.300477ms) - Jul 29 16:32:38.077: INFO: (0) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 35.536541ms) - Jul 29 16:32:38.083: INFO: (0) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 42.336354ms) - Jul 29 16:32:38.084: INFO: (0) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 40.343248ms) - Jul 29 16:32:38.084: INFO: (0) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 43.839986ms) - Jul 29 16:32:38.084: INFO: (0) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 42.578167ms) - Jul 29 16:32:38.085: INFO: (0) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 41.940973ms) - Jul 29 16:32:38.086: INFO: (0) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 20.210314ms) - Jul 29 16:32:38.108: INFO: (1) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 20.618224ms) - Jul 29 16:32:38.108: INFO: (1) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 19.888169ms) - Jul 29 16:32:38.110: INFO: (1) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 22.146402ms) - Jul 29 16:32:38.110: INFO: (1) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 23.420182ms) - Jul 29 16:32:38.112: INFO: (1) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 25.056807ms) - Jul 29 16:32:38.114: INFO: (1) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 27.632571ms) - Jul 29 16:32:38.118: INFO: (1) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 30.777474ms) - Jul 29 16:32:38.118: INFO: (1) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 30.325743ms) - Jul 29 16:32:38.119: INFO: (1) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 29.761579ms) - Jul 29 16:32:38.119: INFO: (1) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 18.305499ms) - Jul 29 16:32:38.143: INFO: (2) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 19.146402ms) - Jul 29 16:32:38.148: INFO: (2) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 23.227503ms) - Jul 29 16:32:38.149: INFO: (2) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 23.218783ms) - Jul 29 16:32:38.149: INFO: (2) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 23.275229ms) - Jul 29 16:32:38.152: INFO: (2) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test<... (200; 28.123768ms) - Jul 29 16:32:38.153: INFO: (2) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 27.301827ms) - Jul 29 16:32:38.153: INFO: (2) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 28.324763ms) - Jul 29 16:32:38.153: INFO: (2) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 29.436572ms) - Jul 29 16:32:38.158: INFO: (2) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 32.498911ms) - Jul 29 16:32:38.158: INFO: (2) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 32.75343ms) - Jul 29 16:32:38.166: INFO: (3) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 7.370553ms) - Jul 29 16:32:38.180: INFO: (3) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 21.079944ms) - Jul 29 16:32:38.181: INFO: (3) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 20.151911ms) - Jul 29 16:32:38.180: INFO: (3) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 22.0371ms) - Jul 29 16:32:38.181: INFO: (3) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 20.665373ms) - Jul 29 16:32:38.183: INFO: (3) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 23.598603ms) - Jul 29 16:32:38.185: INFO: (3) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 22.840135ms) - Jul 29 16:32:38.185: INFO: (3) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 25.65389ms) - Jul 29 16:32:38.186: INFO: (3) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 28.151786ms) - Jul 29 16:32:38.187: INFO: (3) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 26.190363ms) - Jul 29 16:32:38.189: INFO: (3) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 26.687297ms) - Jul 29 16:32:38.189: INFO: (3) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 25.950447ms) - Jul 29 16:32:38.189: INFO: (3) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 29.658069ms) - Jul 29 16:32:38.190: INFO: (3) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 29.382829ms) - Jul 29 16:32:38.195: INFO: (3) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 34.54882ms) - Jul 29 16:32:38.211: INFO: (4) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 15.300727ms) - Jul 29 16:32:38.214: INFO: (4) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 18.397157ms) - Jul 29 16:32:38.214: INFO: (4) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 17.831575ms) - Jul 29 16:32:38.214: INFO: (4) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 18.792654ms) - Jul 29 16:32:38.215: INFO: (4) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 18.806711ms) - Jul 29 16:32:38.220: INFO: (4) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 23.892525ms) - Jul 29 16:32:38.220: INFO: (4) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 24.416276ms) - Jul 29 16:32:38.221: INFO: (4) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 24.604502ms) - Jul 29 16:32:38.221: INFO: (4) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 24.323787ms) - Jul 29 16:32:38.221: INFO: (4) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 24.982048ms) - Jul 29 16:32:38.221: INFO: (4) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 25.079856ms) - Jul 29 16:32:38.223: INFO: (4) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 26.50332ms) - Jul 29 16:32:38.223: INFO: (4) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 26.702388ms) - Jul 29 16:32:38.223: INFO: (4) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 27.232555ms) - Jul 29 16:32:38.235: INFO: (5) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 11.070774ms) - Jul 29 16:32:38.235: INFO: (5) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 11.47216ms) - Jul 29 16:32:38.236: INFO: (5) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 11.847249ms) - Jul 29 16:32:38.238: INFO: (5) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 13.273147ms) - Jul 29 16:32:38.238: INFO: (5) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 14.638505ms) - Jul 29 16:32:38.243: INFO: (5) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 18.694251ms) - Jul 29 16:32:38.244: INFO: (5) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 19.661021ms) - Jul 29 16:32:38.247: INFO: (5) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 22.351168ms) - Jul 29 16:32:38.247: INFO: (5) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 22.522375ms) - Jul 29 16:32:38.251: INFO: (5) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 27.379255ms) - Jul 29 16:32:38.252: INFO: (5) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 26.90698ms) - Jul 29 16:32:38.252: INFO: (5) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 28.173772ms) - Jul 29 16:32:38.252: INFO: (5) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 27.513213ms) - Jul 29 16:32:38.253: INFO: (5) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test<... (200; 18.641101ms) - Jul 29 16:32:38.275: INFO: (6) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 18.371055ms) - Jul 29 16:32:38.276: INFO: (6) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 18.679446ms) - Jul 29 16:32:38.276: INFO: (6) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 19.860368ms) - Jul 29 16:32:38.276: INFO: (6) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 20.067863ms) - Jul 29 16:32:38.277: INFO: (6) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 20.524039ms) - Jul 29 16:32:38.279: INFO: (6) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 45.705664ms) - Jul 29 16:32:38.332: INFO: (7) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 47.116552ms) - Jul 29 16:32:38.338: INFO: (7) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 51.894699ms) - Jul 29 16:32:38.338: INFO: (7) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 53.242561ms) - Jul 29 16:32:38.338: INFO: (7) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 53.165928ms) - Jul 29 16:32:38.338: INFO: (7) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 51.186611ms) - Jul 29 16:32:38.339: INFO: (7) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 52.103114ms) - Jul 29 16:32:38.341: INFO: (7) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 54.061722ms) - Jul 29 16:32:38.342: INFO: (7) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 55.561833ms) - Jul 29 16:32:38.342: INFO: (7) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 56.170381ms) - Jul 29 16:32:38.342: INFO: (7) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 56.904539ms) - Jul 29 16:32:38.343: INFO: (7) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 57.357636ms) - Jul 29 16:32:38.343: INFO: (7) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 57.706114ms) - Jul 29 16:32:38.343: INFO: (7) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 57.345871ms) - Jul 29 16:32:38.344: INFO: (7) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 56.789962ms) - Jul 29 16:32:38.357: INFO: (8) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 11.652892ms) - Jul 29 16:32:38.358: INFO: (8) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 20.21156ms) - Jul 29 16:32:38.365: INFO: (8) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 19.48274ms) - Jul 29 16:32:38.365: INFO: (8) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 21.562637ms) - Jul 29 16:32:38.366: INFO: (8) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 21.385685ms) - Jul 29 16:32:38.366: INFO: (8) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 20.934887ms) - Jul 29 16:32:38.366: INFO: (8) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 22.327894ms) - Jul 29 16:32:38.366: INFO: (8) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 21.261937ms) - Jul 29 16:32:38.367: INFO: (8) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 21.112635ms) - Jul 29 16:32:38.371: INFO: (8) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 25.30134ms) - Jul 29 16:32:38.371: INFO: (8) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 25.289483ms) - Jul 29 16:32:38.371: INFO: (8) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 27.362474ms) - Jul 29 16:32:38.372: INFO: (8) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 27.169282ms) - Jul 29 16:32:38.373: INFO: (8) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 27.289121ms) - Jul 29 16:32:38.373: INFO: (8) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 27.466856ms) - Jul 29 16:32:38.388: INFO: (9) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 13.070894ms) - Jul 29 16:32:38.391: INFO: (9) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 15.958927ms) - Jul 29 16:32:38.392: INFO: (9) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 18.149933ms) - Jul 29 16:32:38.392: INFO: (9) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 18.518866ms) - Jul 29 16:32:38.392: INFO: (9) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 18.221312ms) - Jul 29 16:32:38.393: INFO: (9) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 18.544974ms) - Jul 29 16:32:38.397: INFO: (9) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 22.078824ms) - Jul 29 16:32:38.399: INFO: (9) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 24.103639ms) - Jul 29 16:32:38.400: INFO: (9) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 24.757922ms) - Jul 29 16:32:38.400: INFO: (9) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 25.092239ms) - Jul 29 16:32:38.401: INFO: (9) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 25.527895ms) - Jul 29 16:32:38.401: INFO: (9) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 25.63094ms) - Jul 29 16:32:38.401: INFO: (9) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 27.208797ms) - Jul 29 16:32:38.401: INFO: (9) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 25.93489ms) - Jul 29 16:32:38.401: INFO: (9) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 26.072914ms) - Jul 29 16:32:38.416: INFO: (10) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 13.798904ms) - Jul 29 16:32:38.416: INFO: (10) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 14.002618ms) - Jul 29 16:32:38.421: INFO: (10) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 19.92713ms) - Jul 29 16:32:38.423: INFO: (10) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 20.429733ms) - Jul 29 16:32:38.424: INFO: (10) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 21.45366ms) - Jul 29 16:32:38.426: INFO: (10) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 23.921033ms) - Jul 29 16:32:38.426: INFO: (10) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 22.542962ms) - Jul 29 16:32:38.426: INFO: (10) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 24.656978ms) - Jul 29 16:32:38.427: INFO: (10) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 24.37089ms) - Jul 29 16:32:38.427: INFO: (10) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 24.071904ms) - Jul 29 16:32:38.427: INFO: (10) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 24.688448ms) - Jul 29 16:32:38.427: INFO: (10) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 25.079589ms) - Jul 29 16:32:38.429: INFO: (10) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 26.706183ms) - Jul 29 16:32:38.438: INFO: (11) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 7.717702ms) - Jul 29 16:32:38.442: INFO: (11) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 12.094704ms) - Jul 29 16:32:38.443: INFO: (11) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 13.482004ms) - Jul 29 16:32:38.445: INFO: (11) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 14.761708ms) - Jul 29 16:32:38.445: INFO: (11) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 14.914377ms) - Jul 29 16:32:38.446: INFO: (11) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 15.665083ms) - Jul 29 16:32:38.446: INFO: (11) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 20.047575ms) - Jul 29 16:32:38.450: INFO: (11) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 18.695696ms) - Jul 29 16:32:38.451: INFO: (11) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 19.979593ms) - Jul 29 16:32:38.453: INFO: (11) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 22.195757ms) - Jul 29 16:32:38.454: INFO: (11) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 23.335932ms) - Jul 29 16:32:38.454: INFO: (11) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 24.637117ms) - Jul 29 16:32:38.455: INFO: (11) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 23.533031ms) - Jul 29 16:32:38.456: INFO: (11) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 24.895801ms) - Jul 29 16:32:38.456: INFO: (11) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 24.61144ms) - Jul 29 16:32:38.473: INFO: (12) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 17.150625ms) - Jul 29 16:32:38.474: INFO: (12) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 18.181479ms) - Jul 29 16:32:38.475: INFO: (12) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 18.41795ms) - Jul 29 16:32:38.475: INFO: (12) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 18.612306ms) - Jul 29 16:32:38.475: INFO: (12) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 18.703594ms) - Jul 29 16:32:38.476: INFO: (12) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 18.622348ms) - Jul 29 16:32:38.476: INFO: (12) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 18.756314ms) - Jul 29 16:32:38.476: INFO: (12) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 18.86006ms) - Jul 29 16:32:38.477: INFO: (12) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 21.075159ms) - Jul 29 16:32:38.478: INFO: (12) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 20.574993ms) - Jul 29 16:32:38.478: INFO: (12) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 21.429286ms) - Jul 29 16:32:38.478: INFO: (12) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 21.578751ms) - Jul 29 16:32:38.478: INFO: (12) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 21.083842ms) - Jul 29 16:32:38.480: INFO: (12) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 23.01444ms) - Jul 29 16:32:38.479: INFO: (12) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 22.900947ms) - Jul 29 16:32:38.496: INFO: (13) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 16.062291ms) - Jul 29 16:32:38.496: INFO: (13) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 15.964718ms) - Jul 29 16:32:38.497: INFO: (13) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 17.231809ms) - Jul 29 16:32:38.498: INFO: (13) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 17.05065ms) - Jul 29 16:32:38.498: INFO: (13) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 17.387522ms) - Jul 29 16:32:38.504: INFO: (13) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 23.484413ms) - Jul 29 16:32:38.504: INFO: (13) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 23.272594ms) - Jul 29 16:32:38.504: INFO: (13) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 23.649997ms) - Jul 29 16:32:38.504: INFO: (13) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 23.401318ms) - Jul 29 16:32:38.505: INFO: (13) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 24.601755ms) - Jul 29 16:32:38.505: INFO: (13) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test<... (200; 29.498103ms) - Jul 29 16:32:38.554: INFO: (14) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 37.166995ms) - Jul 29 16:32:38.554: INFO: (14) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 36.859122ms) - Jul 29 16:32:38.554: INFO: (14) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 36.280022ms) - Jul 29 16:32:38.554: INFO: (14) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 36.738322ms) - Jul 29 16:32:38.554: INFO: (14) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 36.329904ms) - Jul 29 16:32:38.555: INFO: (14) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 36.76148ms) - Jul 29 16:32:38.555: INFO: (14) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 37.368383ms) - Jul 29 16:32:38.556: INFO: (14) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 47.412394ms) - Jul 29 16:32:38.613: INFO: (15) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 46.963681ms) - Jul 29 16:32:38.613: INFO: (15) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 47.037565ms) - Jul 29 16:32:38.613: INFO: (15) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 47.675067ms) - Jul 29 16:32:38.613: INFO: (15) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test<... (200; 52.336041ms) - Jul 29 16:32:38.619: INFO: (15) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 52.881551ms) - Jul 29 16:32:38.619: INFO: (15) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 53.128024ms) - Jul 29 16:32:38.620: INFO: (15) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 53.322345ms) - Jul 29 16:32:38.620: INFO: (15) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 53.543872ms) - Jul 29 16:32:38.620: INFO: (15) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 53.756934ms) - Jul 29 16:32:38.627: INFO: (15) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 60.858675ms) - Jul 29 16:32:38.640: INFO: (16) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 13.085609ms) - Jul 29 16:32:38.641: INFO: (16) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 14.446247ms) - Jul 29 16:32:38.643: INFO: (16) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 15.464608ms) - Jul 29 16:32:38.651: INFO: (16) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 23.124268ms) - Jul 29 16:32:38.651: INFO: (16) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 23.376991ms) - Jul 29 16:32:38.651: INFO: (16) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 23.768777ms) - Jul 29 16:32:38.651: INFO: (16) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 23.753462ms) - Jul 29 16:32:38.658: INFO: (16) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 30.712034ms) - Jul 29 16:32:38.659: INFO: (16) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 31.224825ms) - Jul 29 16:32:38.660: INFO: (16) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 24.231184ms) - Jul 29 16:32:38.690: INFO: (17) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 24.113942ms) - Jul 29 16:32:38.690: INFO: (17) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 24.956544ms) - Jul 29 16:32:38.690: INFO: (17) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 24.899526ms) - Jul 29 16:32:38.690: INFO: (17) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 24.701748ms) - Jul 29 16:32:38.691: INFO: (17) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 25.098712ms) - Jul 29 16:32:38.691: INFO: (17) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 25.225886ms) - Jul 29 16:32:38.692: INFO: (17) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 26.485045ms) - Jul 29 16:32:38.693: INFO: (17) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 27.120545ms) - Jul 29 16:32:38.695: INFO: (17) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 28.676816ms) - Jul 29 16:32:38.695: INFO: (17) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 29.508012ms) - Jul 29 16:32:38.696: INFO: (17) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 30.55996ms) - Jul 29 16:32:38.698: INFO: (17) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 32.56387ms) - Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 17.766121ms) - Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 18.087937ms) - Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 17.780651ms) - Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:1080/proxy/: ... (200; 18.335909ms) - Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 19.039791ms) - Jul 29 16:32:38.717: INFO: (18) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 19.205185ms) - Jul 29 16:32:38.718: INFO: (18) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 19.372371ms) - Jul 29 16:32:38.720: INFO: (18) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2/proxy/: test (200; 22.000191ms) - Jul 29 16:32:38.721: INFO: (18) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 22.503433ms) - Jul 29 16:32:38.723: INFO: (18) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 24.920834ms) - Jul 29 16:32:38.724: INFO: (18) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 26.036028ms) - Jul 29 16:32:38.724: INFO: (18) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 25.727231ms) - Jul 29 16:32:38.730: INFO: (18) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 31.339893ms) - Jul 29 16:32:38.730: INFO: (18) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 31.479603ms) - Jul 29 16:32:38.731: INFO: (18) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 33.055804ms) - Jul 29 16:32:38.732: INFO: (18) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: ... (200; 27.061275ms) - Jul 29 16:32:38.760: INFO: (19) /api/v1/namespaces/proxy-7213/pods/http:proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 27.485195ms) - Jul 29 16:32:38.762: INFO: (19) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:443/proxy/: test (200; 31.712476ms) - Jul 29 16:32:38.764: INFO: (19) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:1080/proxy/: test<... (200; 31.068495ms) - Jul 29 16:32:38.766: INFO: (19) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:462/proxy/: tls qux (200; 33.451585ms) - Jul 29 16:32:38.767: INFO: (19) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname1/proxy/: foo (200; 33.971991ms) - Jul 29 16:32:38.768: INFO: (19) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname1/proxy/: tls baz (200; 35.196311ms) - Jul 29 16:32:38.772: INFO: (19) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:162/proxy/: bar (200; 39.33797ms) - Jul 29 16:32:38.772: INFO: (19) /api/v1/namespaces/proxy-7213/pods/https:proxy-service-68xg5-6xpl2:460/proxy/: tls baz (200; 39.675245ms) - Jul 29 16:32:38.773: INFO: (19) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname2/proxy/: bar (200; 39.640187ms) - Jul 29 16:32:38.773: INFO: (19) /api/v1/namespaces/proxy-7213/pods/proxy-service-68xg5-6xpl2:160/proxy/: foo (200; 40.132488ms) - Jul 29 16:32:38.773: INFO: (19) /api/v1/namespaces/proxy-7213/services/proxy-service-68xg5:portname2/proxy/: bar (200; 40.139114ms) - Jul 29 16:32:38.774: INFO: (19) /api/v1/namespaces/proxy-7213/services/https:proxy-service-68xg5:tlsportname2/proxy/: tls qux (200; 41.731195ms) - Jul 29 16:32:38.774: INFO: (19) /api/v1/namespaces/proxy-7213/services/http:proxy-service-68xg5:portname1/proxy/: foo (200; 41.249556ms) - STEP: deleting ReplicationController proxy-service-68xg5 in namespace proxy-7213, will wait for the garbage collector to delete the pods 07/29/23 16:32:38.775 - Jul 29 16:32:38.860: INFO: Deleting ReplicationController proxy-service-68xg5 took: 27.072243ms - Jul 29 16:32:38.961: INFO: Terminating ReplicationController proxy-service-68xg5 pods took: 100.740883ms - [AfterEach] version v1 + [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:97 + Aug 24 12:40:19.469: INFO: Waiting up to 1m0s for all nodes to be ready + Aug 24 12:41:19.529: INFO: Waiting for terminating namespaces to be deleted... + [BeforeEach] PriorityClass endpoints + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:41:19.536 + Aug 24 12:41:19.536: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sched-preemption-path 08/24/23 12:41:19.538 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:19.598 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:19.604 + [BeforeEach] PriorityClass endpoints + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] PriorityClass endpoints + test/e2e/scheduling/preemption.go:771 + [It] verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] + test/e2e/scheduling/preemption.go:814 + Aug 24 12:41:19.641: INFO: PriorityClass.scheduling.k8s.io "p1" is invalid: value: Forbidden: may not be changed in an update. + Aug 24 12:41:19.650: INFO: PriorityClass.scheduling.k8s.io "p2" is invalid: value: Forbidden: may not be changed in an update. + [AfterEach] PriorityClass endpoints test/e2e/framework/node/init/init.go:32 - Jul 29 16:32:41.063: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] version v1 + Aug 24 12:41:19.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] PriorityClass endpoints + test/e2e/scheduling/preemption.go:787 + [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/framework/node/init/init.go:32 + Aug 24 12:41:19.726: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:84 + [DeferCleanup (Each)] PriorityClass endpoints test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] version v1 + [DeferCleanup (Each)] PriorityClass endpoints dump namespaces | framework.go:196 - [DeferCleanup (Each)] version v1 + [DeferCleanup (Each)] PriorityClass endpoints + tear down framework | framework.go:193 + STEP: Destroying namespace "sched-preemption-path-1328" for this suite. 08/24/23 12:41:19.821 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "proxy-7213" for this suite. 07/29/23 16:32:41.072 + STEP: Destroying namespace "sched-preemption-6369" for this suite. 08/24/23 12:41:19.835 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Secrets - should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:99 -[BeforeEach] [sig-storage] Secrets +[sig-storage] ConfigMap + updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:124 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:32:41.095 -Jul 29 16:32:41.095: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 16:32:41.097 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:41.133 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:41.144 -[BeforeEach] [sig-storage] Secrets +STEP: Creating a kubernetes client 08/24/23 12:41:19.856 +Aug 24 12:41:19.856: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 12:41:19.859 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:19.892 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:19.897 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:99 -STEP: Creating secret with name secret-test-41821c60-92a8-45a6-8096-3f2075b32f01 07/29/23 16:32:41.211 -STEP: Creating a pod to test consume secrets 07/29/23 16:32:41.227 -Jul 29 16:32:41.275: INFO: Waiting up to 5m0s for pod "pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a" in namespace "secrets-8655" to be "Succeeded or Failed" -Jul 29 16:32:41.285: INFO: Pod "pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a": Phase="Pending", Reason="", readiness=false. Elapsed: 9.922738ms -Jul 29 16:32:43.295: INFO: Pod "pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020205968s -Jul 29 16:32:45.314: INFO: Pod "pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.039536526s -STEP: Saw pod success 07/29/23 16:32:45.315 -Jul 29 16:32:45.315: INFO: Pod "pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a" satisfied condition "Succeeded or Failed" -Jul 29 16:32:45.321: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a container secret-volume-test: -STEP: delete the pod 07/29/23 16:32:45.37 -Jul 29 16:32:45.396: INFO: Waiting for pod pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a to disappear -Jul 29 16:32:45.402: INFO: Pod pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a no longer exists -[AfterEach] [sig-storage] Secrets +[It] updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:124 +STEP: Creating configMap with name configmap-test-upd-80b8c09a-cf0f-4301-8841-31061bb5b896 08/24/23 12:41:19.913 +STEP: Creating the pod 08/24/23 12:41:19.922 +Aug 24 12:41:19.942: INFO: Waiting up to 5m0s for pod "pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d" in namespace "configmap-5990" to be "running and ready" +Aug 24 12:41:19.950: INFO: Pod "pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.778726ms +Aug 24 12:41:19.950: INFO: The phase of Pod pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:41:21.958: INFO: Pod "pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d": Phase="Running", Reason="", readiness=true. Elapsed: 2.015880557s +Aug 24 12:41:21.958: INFO: The phase of Pod pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d is Running (Ready = true) +Aug 24 12:41:21.958: INFO: Pod "pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d" satisfied condition "running and ready" +STEP: Updating configmap configmap-test-upd-80b8c09a-cf0f-4301-8841-31061bb5b896 08/24/23 12:41:21.977 +STEP: waiting to observe update in volume 08/24/23 12:41:21.987 +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:32:45.402: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Secrets +Aug 24 12:41:24.012: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-8655" for this suite. 07/29/23 16:32:45.413 -STEP: Destroying namespace "secret-namespace-630" for this suite. 07/29/23 16:32:45.423 +STEP: Destroying namespace "configmap-5990" for this suite. 08/24/23 12:41:24.02 ------------------------------ -• [4.339 seconds] -[sig-storage] Secrets +• [4.174 seconds] +[sig-storage] ConfigMap test/e2e/common/storage/framework.go:23 - should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:99 + updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:124 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Secrets + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:32:41.095 - Jul 29 16:32:41.095: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 16:32:41.097 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:41.133 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:41.144 - [BeforeEach] [sig-storage] Secrets + STEP: Creating a kubernetes client 08/24/23 12:41:19.856 + Aug 24 12:41:19.856: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 12:41:19.859 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:19.892 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:19.897 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:99 - STEP: Creating secret with name secret-test-41821c60-92a8-45a6-8096-3f2075b32f01 07/29/23 16:32:41.211 - STEP: Creating a pod to test consume secrets 07/29/23 16:32:41.227 - Jul 29 16:32:41.275: INFO: Waiting up to 5m0s for pod "pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a" in namespace "secrets-8655" to be "Succeeded or Failed" - Jul 29 16:32:41.285: INFO: Pod "pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a": Phase="Pending", Reason="", readiness=false. Elapsed: 9.922738ms - Jul 29 16:32:43.295: INFO: Pod "pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020205968s - Jul 29 16:32:45.314: INFO: Pod "pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.039536526s - STEP: Saw pod success 07/29/23 16:32:45.315 - Jul 29 16:32:45.315: INFO: Pod "pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a" satisfied condition "Succeeded or Failed" - Jul 29 16:32:45.321: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a container secret-volume-test: - STEP: delete the pod 07/29/23 16:32:45.37 - Jul 29 16:32:45.396: INFO: Waiting for pod pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a to disappear - Jul 29 16:32:45.402: INFO: Pod pod-secrets-72f560f1-28b3-43bd-9bf2-962c61ffde6a no longer exists - [AfterEach] [sig-storage] Secrets + [It] updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:124 + STEP: Creating configMap with name configmap-test-upd-80b8c09a-cf0f-4301-8841-31061bb5b896 08/24/23 12:41:19.913 + STEP: Creating the pod 08/24/23 12:41:19.922 + Aug 24 12:41:19.942: INFO: Waiting up to 5m0s for pod "pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d" in namespace "configmap-5990" to be "running and ready" + Aug 24 12:41:19.950: INFO: Pod "pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.778726ms + Aug 24 12:41:19.950: INFO: The phase of Pod pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:41:21.958: INFO: Pod "pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d": Phase="Running", Reason="", readiness=true. Elapsed: 2.015880557s + Aug 24 12:41:21.958: INFO: The phase of Pod pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d is Running (Ready = true) + Aug 24 12:41:21.958: INFO: Pod "pod-configmaps-26e7a79b-d13b-4b20-bdf9-ca027a83977d" satisfied condition "running and ready" + STEP: Updating configmap configmap-test-upd-80b8c09a-cf0f-4301-8841-31061bb5b896 08/24/23 12:41:21.977 + STEP: waiting to observe update in volume 08/24/23 12:41:21.987 + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:32:45.402: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Secrets + Aug 24 12:41:24.012: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-8655" for this suite. 07/29/23 16:32:45.413 - STEP: Destroying namespace "secret-namespace-630" for this suite. 07/29/23 16:32:45.423 + STEP: Destroying namespace "configmap-5990" for this suite. 08/24/23 12:41:24.02 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should verify ResourceQuota with terminating scopes. [Conformance] - test/e2e/apimachinery/resource_quota.go:690 -[BeforeEach] [sig-api-machinery] ResourceQuota +[sig-node] Downward API + should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:166 +[BeforeEach] [sig-node] Downward API set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:32:45.444 -Jul 29 16:32:45.445: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 16:32:45.448 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:45.476 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:45.481 -[BeforeEach] [sig-api-machinery] ResourceQuota +STEP: Creating a kubernetes client 08/24/23 12:41:24.032 +Aug 24 12:41:24.032: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:41:24.034 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:24.068 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:24.074 +[BeforeEach] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:31 -[It] should verify ResourceQuota with terminating scopes. [Conformance] - test/e2e/apimachinery/resource_quota.go:690 -STEP: Creating a ResourceQuota with terminating scope 07/29/23 16:32:45.487 -STEP: Ensuring ResourceQuota status is calculated 07/29/23 16:32:45.495 -STEP: Creating a ResourceQuota with not terminating scope 07/29/23 16:32:47.505 -STEP: Ensuring ResourceQuota status is calculated 07/29/23 16:32:47.514 -STEP: Creating a long running pod 07/29/23 16:32:49.522 -STEP: Ensuring resource quota with not terminating scope captures the pod usage 07/29/23 16:32:49.553 -STEP: Ensuring resource quota with terminating scope ignored the pod usage 07/29/23 16:32:51.561 -STEP: Deleting the pod 07/29/23 16:32:53.568 -STEP: Ensuring resource quota status released the pod usage 07/29/23 16:32:53.597 -STEP: Creating a terminating pod 07/29/23 16:32:55.604 -STEP: Ensuring resource quota with terminating scope captures the pod usage 07/29/23 16:32:55.635 -STEP: Ensuring resource quota with not terminating scope ignored the pod usage 07/29/23 16:32:57.647 -STEP: Deleting the pod 07/29/23 16:32:59.682 -STEP: Ensuring resource quota status released the pod usage 07/29/23 16:32:59.701 -[AfterEach] [sig-api-machinery] ResourceQuota +[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:166 +STEP: Creating a pod to test downward api env vars 08/24/23 12:41:24.081 +Aug 24 12:41:24.101: INFO: Waiting up to 5m0s for pod "downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5" in namespace "downward-api-5476" to be "Succeeded or Failed" +Aug 24 12:41:24.107: INFO: Pod "downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.439638ms +Aug 24 12:41:26.121: INFO: Pod "downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019913911s +Aug 24 12:41:28.118: INFO: Pod "downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017262184s +STEP: Saw pod success 08/24/23 12:41:28.118 +Aug 24 12:41:28.118: INFO: Pod "downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5" satisfied condition "Succeeded or Failed" +Aug 24 12:41:28.123: INFO: Trying to get logs from node pe9deep4seen-3 pod downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5 container dapi-container: +STEP: delete the pod 08/24/23 12:41:28.137 +Aug 24 12:41:28.158: INFO: Waiting for pod downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5 to disappear +Aug 24 12:41:28.164: INFO: Pod downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5 no longer exists +[AfterEach] [sig-node] Downward API test/e2e/framework/node/init/init.go:32 -Jul 29 16:33:01.708: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +Aug 24 12:41:28.164: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-node] Downward API dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-node] Downward API tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-1980" for this suite. 07/29/23 16:33:01.716 +STEP: Destroying namespace "downward-api-5476" for this suite. 08/24/23 12:41:28.175 ------------------------------ -• [SLOW TEST] [16.292 seconds] -[sig-api-machinery] ResourceQuota -test/e2e/apimachinery/framework.go:23 - should verify ResourceQuota with terminating scopes. [Conformance] - test/e2e/apimachinery/resource_quota.go:690 +• [4.157 seconds] +[sig-node] Downward API +test/e2e/common/node/framework.go:23 + should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:166 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-node] Downward API set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:32:45.444 - Jul 29 16:32:45.445: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 16:32:45.448 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:32:45.476 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:32:45.481 - [BeforeEach] [sig-api-machinery] ResourceQuota + STEP: Creating a kubernetes client 08/24/23 12:41:24.032 + Aug 24 12:41:24.032: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:41:24.034 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:24.068 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:24.074 + [BeforeEach] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:31 - [It] should verify ResourceQuota with terminating scopes. [Conformance] - test/e2e/apimachinery/resource_quota.go:690 - STEP: Creating a ResourceQuota with terminating scope 07/29/23 16:32:45.487 - STEP: Ensuring ResourceQuota status is calculated 07/29/23 16:32:45.495 - STEP: Creating a ResourceQuota with not terminating scope 07/29/23 16:32:47.505 - STEP: Ensuring ResourceQuota status is calculated 07/29/23 16:32:47.514 - STEP: Creating a long running pod 07/29/23 16:32:49.522 - STEP: Ensuring resource quota with not terminating scope captures the pod usage 07/29/23 16:32:49.553 - STEP: Ensuring resource quota with terminating scope ignored the pod usage 07/29/23 16:32:51.561 - STEP: Deleting the pod 07/29/23 16:32:53.568 - STEP: Ensuring resource quota status released the pod usage 07/29/23 16:32:53.597 - STEP: Creating a terminating pod 07/29/23 16:32:55.604 - STEP: Ensuring resource quota with terminating scope captures the pod usage 07/29/23 16:32:55.635 - STEP: Ensuring resource quota with not terminating scope ignored the pod usage 07/29/23 16:32:57.647 - STEP: Deleting the pod 07/29/23 16:32:59.682 - STEP: Ensuring resource quota status released the pod usage 07/29/23 16:32:59.701 - [AfterEach] [sig-api-machinery] ResourceQuota + [It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:166 + STEP: Creating a pod to test downward api env vars 08/24/23 12:41:24.081 + Aug 24 12:41:24.101: INFO: Waiting up to 5m0s for pod "downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5" in namespace "downward-api-5476" to be "Succeeded or Failed" + Aug 24 12:41:24.107: INFO: Pod "downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5": Phase="Pending", Reason="", readiness=false. Elapsed: 6.439638ms + Aug 24 12:41:26.121: INFO: Pod "downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019913911s + Aug 24 12:41:28.118: INFO: Pod "downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017262184s + STEP: Saw pod success 08/24/23 12:41:28.118 + Aug 24 12:41:28.118: INFO: Pod "downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5" satisfied condition "Succeeded or Failed" + Aug 24 12:41:28.123: INFO: Trying to get logs from node pe9deep4seen-3 pod downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5 container dapi-container: + STEP: delete the pod 08/24/23 12:41:28.137 + Aug 24 12:41:28.158: INFO: Waiting for pod downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5 to disappear + Aug 24 12:41:28.164: INFO: Pod downward-api-876f3d5a-acd5-4768-b086-a8976b0e91a5 no longer exists + [AfterEach] [sig-node] Downward API test/e2e/framework/node/init/init.go:32 - Jul 29 16:33:01.708: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + Aug 24 12:41:28.164: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-node] Downward API dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-node] Downward API tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-1980" for this suite. 07/29/23 16:33:01.716 + STEP: Destroying namespace "downward-api-5476" for this suite. 08/24/23 12:41:28.175 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Update Demo - should scale a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:352 -[BeforeEach] [sig-cli] Kubectl client +[sig-apps] Daemon set [Serial] + should run and stop simple daemon [Conformance] + test/e2e/apps/daemon_set.go:177 +[BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:33:01.738 -Jul 29 16:33:01.738: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:33:01.74 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:33:01.784 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:33:01.791 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 12:41:28.196 +Aug 24 12:41:28.196: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename daemonsets 08/24/23 12:41:28.197 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:28.226 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:28.231 +[BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[BeforeEach] Update Demo - test/e2e/kubectl/kubectl.go:326 -[It] should scale a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:352 -STEP: creating a replication controller 07/29/23 16:33:01.805 -Jul 29 16:33:01.805: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 create -f -' -Jul 29 16:33:03.260: INFO: stderr: "" -Jul 29 16:33:03.260: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" -STEP: waiting for all containers in name=update-demo pods to come up. 07/29/23 16:33:03.26 -Jul 29 16:33:03.261: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Jul 29 16:33:03.466: INFO: stderr: "" -Jul 29 16:33:03.466: INFO: stdout: "update-demo-nautilus-6g5lr update-demo-nautilus-xq4l7 " -Jul 29 16:33:03.466: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:33:03.666: INFO: stderr: "" -Jul 29 16:33:03.666: INFO: stdout: "" -Jul 29 16:33:03.667: INFO: update-demo-nautilus-6g5lr is created but not running -Jul 29 16:33:08.668: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Jul 29 16:33:08.814: INFO: stderr: "" -Jul 29 16:33:08.814: INFO: stdout: "update-demo-nautilus-6g5lr update-demo-nautilus-xq4l7 " -Jul 29 16:33:08.814: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:33:08.944: INFO: stderr: "" -Jul 29 16:33:08.944: INFO: stdout: "true" -Jul 29 16:33:08.944: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Jul 29 16:33:09.109: INFO: stderr: "" -Jul 29 16:33:09.109: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" -Jul 29 16:33:09.109: INFO: validating pod update-demo-nautilus-6g5lr -Jul 29 16:33:09.124: INFO: got data: { - "image": "nautilus.jpg" -} - -Jul 29 16:33:09.124: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Jul 29 16:33:09.125: INFO: update-demo-nautilus-6g5lr is verified up and running -Jul 29 16:33:09.125: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-xq4l7 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:33:09.250: INFO: stderr: "" -Jul 29 16:33:09.250: INFO: stdout: "true" -Jul 29 16:33:09.250: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-xq4l7 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Jul 29 16:33:09.380: INFO: stderr: "" -Jul 29 16:33:09.380: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" -Jul 29 16:33:09.380: INFO: validating pod update-demo-nautilus-xq4l7 -Jul 29 16:33:09.398: INFO: got data: { - "image": "nautilus.jpg" -} - -Jul 29 16:33:09.398: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Jul 29 16:33:09.398: INFO: update-demo-nautilus-xq4l7 is verified up and running -STEP: scaling down the replication controller 07/29/23 16:33:09.398 -Jul 29 16:33:09.414: INFO: scanned /root for discovery docs: -Jul 29 16:33:09.414: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 scale rc update-demo-nautilus --replicas=1 --timeout=5m' -Jul 29 16:33:10.610: INFO: stderr: "" -Jul 29 16:33:10.610: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" -STEP: waiting for all containers in name=update-demo pods to come up. 07/29/23 16:33:10.61 -Jul 29 16:33:10.610: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Jul 29 16:33:10.733: INFO: stderr: "" -Jul 29 16:33:10.733: INFO: stdout: "update-demo-nautilus-6g5lr " -Jul 29 16:33:10.734: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:33:10.865: INFO: stderr: "" -Jul 29 16:33:10.865: INFO: stdout: "true" -Jul 29 16:33:10.865: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Jul 29 16:33:11.037: INFO: stderr: "" -Jul 29 16:33:11.037: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" -Jul 29 16:33:11.037: INFO: validating pod update-demo-nautilus-6g5lr -Jul 29 16:33:11.047: INFO: got data: { - "image": "nautilus.jpg" -} - -Jul 29 16:33:11.048: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Jul 29 16:33:11.048: INFO: update-demo-nautilus-6g5lr is verified up and running -STEP: scaling up the replication controller 07/29/23 16:33:11.048 -Jul 29 16:33:11.060: INFO: scanned /root for discovery docs: -Jul 29 16:33:11.060: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 scale rc update-demo-nautilus --replicas=2 --timeout=5m' -Jul 29 16:33:12.253: INFO: stderr: "" -Jul 29 16:33:12.253: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" -STEP: waiting for all containers in name=update-demo pods to come up. 07/29/23 16:33:12.253 -Jul 29 16:33:12.254: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Jul 29 16:33:12.404: INFO: stderr: "" -Jul 29 16:33:12.404: INFO: stdout: "update-demo-nautilus-6g5lr update-demo-nautilus-t46pp " -Jul 29 16:33:12.405: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:33:12.542: INFO: stderr: "" -Jul 29 16:33:12.542: INFO: stdout: "true" -Jul 29 16:33:12.542: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Jul 29 16:33:12.682: INFO: stderr: "" -Jul 29 16:33:12.682: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" -Jul 29 16:33:12.682: INFO: validating pod update-demo-nautilus-6g5lr -Jul 29 16:33:12.691: INFO: got data: { - "image": "nautilus.jpg" -} - -Jul 29 16:33:12.691: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Jul 29 16:33:12.691: INFO: update-demo-nautilus-6g5lr is verified up and running -Jul 29 16:33:12.691: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-t46pp -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:33:12.831: INFO: stderr: "" -Jul 29 16:33:12.831: INFO: stdout: "" -Jul 29 16:33:12.831: INFO: update-demo-nautilus-t46pp is created but not running -Jul 29 16:33:17.833: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' -Jul 29 16:33:17.965: INFO: stderr: "" -Jul 29 16:33:17.965: INFO: stdout: "update-demo-nautilus-6g5lr update-demo-nautilus-t46pp " -Jul 29 16:33:17.966: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:33:18.099: INFO: stderr: "" -Jul 29 16:33:18.099: INFO: stdout: "true" -Jul 29 16:33:18.100: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Jul 29 16:33:18.230: INFO: stderr: "" -Jul 29 16:33:18.230: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" -Jul 29 16:33:18.231: INFO: validating pod update-demo-nautilus-6g5lr -Jul 29 16:33:18.239: INFO: got data: { - "image": "nautilus.jpg" -} +[BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 +[It] should run and stop simple daemon [Conformance] + test/e2e/apps/daemon_set.go:177 +STEP: Creating simple DaemonSet "daemon-set" 08/24/23 12:41:28.268 +STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 12:41:28.278 +Aug 24 12:41:28.291: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:41:28.291: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:41:29.320: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:41:29.320: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:41:30.309: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 +Aug 24 12:41:30.309: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set +STEP: Stop a daemon pod, check that the daemon pod is revived. 08/24/23 12:41:30.316 +Aug 24 12:41:30.358: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 12:41:30.358: INFO: Node pe9deep4seen-2 is running 0 daemon pod, expected 1 +Aug 24 12:41:31.378: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 12:41:31.378: INFO: Node pe9deep4seen-2 is running 0 daemon pod, expected 1 +Aug 24 12:41:32.383: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 12:41:32.383: INFO: Node pe9deep4seen-2 is running 0 daemon pod, expected 1 +Aug 24 12:41:33.381: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 12:41:33.381: INFO: Node pe9deep4seen-2 is running 0 daemon pod, expected 1 +Aug 24 12:41:34.383: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 12:41:34.383: INFO: Node pe9deep4seen-2 is running 0 daemon pod, expected 1 +Aug 24 12:41:35.375: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 +Aug 24 12:41:35.375: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set +[AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 +STEP: Deleting DaemonSet "daemon-set" 08/24/23 12:41:35.382 +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-6175, will wait for the garbage collector to delete the pods 08/24/23 12:41:35.383 +Aug 24 12:41:35.478: INFO: Deleting DaemonSet.extensions daemon-set took: 39.074686ms +Aug 24 12:41:35.679: INFO: Terminating DaemonSet.extensions daemon-set pods took: 200.937044ms +Aug 24 12:41:37.786: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:41:37.786: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set +Aug 24 12:41:37.791: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"23889"},"items":null} -Jul 29 16:33:18.239: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Jul 29 16:33:18.239: INFO: update-demo-nautilus-6g5lr is verified up and running -Jul 29 16:33:18.239: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-t46pp -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' -Jul 29 16:33:18.434: INFO: stderr: "" -Jul 29 16:33:18.434: INFO: stdout: "true" -Jul 29 16:33:18.434: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-t46pp -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' -Jul 29 16:33:18.578: INFO: stderr: "" -Jul 29 16:33:18.578: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" -Jul 29 16:33:18.578: INFO: validating pod update-demo-nautilus-t46pp -Jul 29 16:33:18.594: INFO: got data: { - "image": "nautilus.jpg" -} +Aug 24 12:41:37.798: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"23890"},"items":null} -Jul 29 16:33:18.594: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . -Jul 29 16:33:18.594: INFO: update-demo-nautilus-t46pp is verified up and running -STEP: using delete to clean up resources 07/29/23 16:33:18.594 -Jul 29 16:33:18.595: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 delete --grace-period=0 --force -f -' -Jul 29 16:33:18.736: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" -Jul 29 16:33:18.736: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" -Jul 29 16:33:18.736: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get rc,svc -l name=update-demo --no-headers' -Jul 29 16:33:18.958: INFO: stderr: "No resources found in kubectl-6391 namespace.\n" -Jul 29 16:33:18.958: INFO: stdout: "" -Jul 29 16:33:18.959: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' -Jul 29 16:33:19.183: INFO: stderr: "" -Jul 29 16:33:19.183: INFO: stdout: "" -[AfterEach] [sig-cli] Kubectl client +[AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:33:19.184: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 12:41:37.823: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-6391" for this suite. 07/29/23 16:33:19.196 +STEP: Destroying namespace "daemonsets-6175" for this suite. 08/24/23 12:41:37.837 ------------------------------ -• [SLOW TEST] [17.474 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Update Demo - test/e2e/kubectl/kubectl.go:324 - should scale a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:352 +• [SLOW TEST] [9.654 seconds] +[sig-apps] Daemon set [Serial] +test/e2e/apps/framework.go:23 + should run and stop simple daemon [Conformance] + test/e2e/apps/daemon_set.go:177 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:33:01.738 - Jul 29 16:33:01.738: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:33:01.74 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:33:01.784 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:33:01.791 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 12:41:28.196 + Aug 24 12:41:28.196: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename daemonsets 08/24/23 12:41:28.197 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:28.226 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:28.231 + [BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [BeforeEach] Update Demo - test/e2e/kubectl/kubectl.go:326 - [It] should scale a replication controller [Conformance] - test/e2e/kubectl/kubectl.go:352 - STEP: creating a replication controller 07/29/23 16:33:01.805 - Jul 29 16:33:01.805: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 create -f -' - Jul 29 16:33:03.260: INFO: stderr: "" - Jul 29 16:33:03.260: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" - STEP: waiting for all containers in name=update-demo pods to come up. 07/29/23 16:33:03.26 - Jul 29 16:33:03.261: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' - Jul 29 16:33:03.466: INFO: stderr: "" - Jul 29 16:33:03.466: INFO: stdout: "update-demo-nautilus-6g5lr update-demo-nautilus-xq4l7 " - Jul 29 16:33:03.466: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:33:03.666: INFO: stderr: "" - Jul 29 16:33:03.666: INFO: stdout: "" - Jul 29 16:33:03.667: INFO: update-demo-nautilus-6g5lr is created but not running - Jul 29 16:33:08.668: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' - Jul 29 16:33:08.814: INFO: stderr: "" - Jul 29 16:33:08.814: INFO: stdout: "update-demo-nautilus-6g5lr update-demo-nautilus-xq4l7 " - Jul 29 16:33:08.814: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:33:08.944: INFO: stderr: "" - Jul 29 16:33:08.944: INFO: stdout: "true" - Jul 29 16:33:08.944: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' - Jul 29 16:33:09.109: INFO: stderr: "" - Jul 29 16:33:09.109: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" - Jul 29 16:33:09.109: INFO: validating pod update-demo-nautilus-6g5lr - Jul 29 16:33:09.124: INFO: got data: { - "image": "nautilus.jpg" - } - - Jul 29 16:33:09.124: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . - Jul 29 16:33:09.125: INFO: update-demo-nautilus-6g5lr is verified up and running - Jul 29 16:33:09.125: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-xq4l7 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:33:09.250: INFO: stderr: "" - Jul 29 16:33:09.250: INFO: stdout: "true" - Jul 29 16:33:09.250: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-xq4l7 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' - Jul 29 16:33:09.380: INFO: stderr: "" - Jul 29 16:33:09.380: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" - Jul 29 16:33:09.380: INFO: validating pod update-demo-nautilus-xq4l7 - Jul 29 16:33:09.398: INFO: got data: { - "image": "nautilus.jpg" - } - - Jul 29 16:33:09.398: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . - Jul 29 16:33:09.398: INFO: update-demo-nautilus-xq4l7 is verified up and running - STEP: scaling down the replication controller 07/29/23 16:33:09.398 - Jul 29 16:33:09.414: INFO: scanned /root for discovery docs: - Jul 29 16:33:09.414: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 scale rc update-demo-nautilus --replicas=1 --timeout=5m' - Jul 29 16:33:10.610: INFO: stderr: "" - Jul 29 16:33:10.610: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" - STEP: waiting for all containers in name=update-demo pods to come up. 07/29/23 16:33:10.61 - Jul 29 16:33:10.610: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' - Jul 29 16:33:10.733: INFO: stderr: "" - Jul 29 16:33:10.733: INFO: stdout: "update-demo-nautilus-6g5lr " - Jul 29 16:33:10.734: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:33:10.865: INFO: stderr: "" - Jul 29 16:33:10.865: INFO: stdout: "true" - Jul 29 16:33:10.865: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' - Jul 29 16:33:11.037: INFO: stderr: "" - Jul 29 16:33:11.037: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" - Jul 29 16:33:11.037: INFO: validating pod update-demo-nautilus-6g5lr - Jul 29 16:33:11.047: INFO: got data: { - "image": "nautilus.jpg" - } - - Jul 29 16:33:11.048: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . - Jul 29 16:33:11.048: INFO: update-demo-nautilus-6g5lr is verified up and running - STEP: scaling up the replication controller 07/29/23 16:33:11.048 - Jul 29 16:33:11.060: INFO: scanned /root for discovery docs: - Jul 29 16:33:11.060: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 scale rc update-demo-nautilus --replicas=2 --timeout=5m' - Jul 29 16:33:12.253: INFO: stderr: "" - Jul 29 16:33:12.253: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" - STEP: waiting for all containers in name=update-demo pods to come up. 07/29/23 16:33:12.253 - Jul 29 16:33:12.254: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' - Jul 29 16:33:12.404: INFO: stderr: "" - Jul 29 16:33:12.404: INFO: stdout: "update-demo-nautilus-6g5lr update-demo-nautilus-t46pp " - Jul 29 16:33:12.405: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:33:12.542: INFO: stderr: "" - Jul 29 16:33:12.542: INFO: stdout: "true" - Jul 29 16:33:12.542: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' - Jul 29 16:33:12.682: INFO: stderr: "" - Jul 29 16:33:12.682: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" - Jul 29 16:33:12.682: INFO: validating pod update-demo-nautilus-6g5lr - Jul 29 16:33:12.691: INFO: got data: { - "image": "nautilus.jpg" - } - - Jul 29 16:33:12.691: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . - Jul 29 16:33:12.691: INFO: update-demo-nautilus-6g5lr is verified up and running - Jul 29 16:33:12.691: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-t46pp -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:33:12.831: INFO: stderr: "" - Jul 29 16:33:12.831: INFO: stdout: "" - Jul 29 16:33:12.831: INFO: update-demo-nautilus-t46pp is created but not running - Jul 29 16:33:17.833: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' - Jul 29 16:33:17.965: INFO: stderr: "" - Jul 29 16:33:17.965: INFO: stdout: "update-demo-nautilus-6g5lr update-demo-nautilus-t46pp " - Jul 29 16:33:17.966: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:33:18.099: INFO: stderr: "" - Jul 29 16:33:18.099: INFO: stdout: "true" - Jul 29 16:33:18.100: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-6g5lr -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' - Jul 29 16:33:18.230: INFO: stderr: "" - Jul 29 16:33:18.230: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" - Jul 29 16:33:18.231: INFO: validating pod update-demo-nautilus-6g5lr - Jul 29 16:33:18.239: INFO: got data: { - "image": "nautilus.jpg" - } + [BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 + [It] should run and stop simple daemon [Conformance] + test/e2e/apps/daemon_set.go:177 + STEP: Creating simple DaemonSet "daemon-set" 08/24/23 12:41:28.268 + STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 12:41:28.278 + Aug 24 12:41:28.291: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:41:28.291: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:41:29.320: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:41:29.320: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:41:30.309: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 + Aug 24 12:41:30.309: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set + STEP: Stop a daemon pod, check that the daemon pod is revived. 08/24/23 12:41:30.316 + Aug 24 12:41:30.358: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 12:41:30.358: INFO: Node pe9deep4seen-2 is running 0 daemon pod, expected 1 + Aug 24 12:41:31.378: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 12:41:31.378: INFO: Node pe9deep4seen-2 is running 0 daemon pod, expected 1 + Aug 24 12:41:32.383: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 12:41:32.383: INFO: Node pe9deep4seen-2 is running 0 daemon pod, expected 1 + Aug 24 12:41:33.381: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 12:41:33.381: INFO: Node pe9deep4seen-2 is running 0 daemon pod, expected 1 + Aug 24 12:41:34.383: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 12:41:34.383: INFO: Node pe9deep4seen-2 is running 0 daemon pod, expected 1 + Aug 24 12:41:35.375: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 + Aug 24 12:41:35.375: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set + [AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 + STEP: Deleting DaemonSet "daemon-set" 08/24/23 12:41:35.382 + STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-6175, will wait for the garbage collector to delete the pods 08/24/23 12:41:35.383 + Aug 24 12:41:35.478: INFO: Deleting DaemonSet.extensions daemon-set took: 39.074686ms + Aug 24 12:41:35.679: INFO: Terminating DaemonSet.extensions daemon-set pods took: 200.937044ms + Aug 24 12:41:37.786: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:41:37.786: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set + Aug 24 12:41:37.791: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"23889"},"items":null} - Jul 29 16:33:18.239: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . - Jul 29 16:33:18.239: INFO: update-demo-nautilus-6g5lr is verified up and running - Jul 29 16:33:18.239: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-t46pp -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' - Jul 29 16:33:18.434: INFO: stderr: "" - Jul 29 16:33:18.434: INFO: stdout: "true" - Jul 29 16:33:18.434: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods update-demo-nautilus-t46pp -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' - Jul 29 16:33:18.578: INFO: stderr: "" - Jul 29 16:33:18.578: INFO: stdout: "registry.k8s.io/e2e-test-images/nautilus:1.7" - Jul 29 16:33:18.578: INFO: validating pod update-demo-nautilus-t46pp - Jul 29 16:33:18.594: INFO: got data: { - "image": "nautilus.jpg" - } + Aug 24 12:41:37.798: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"23890"},"items":null} - Jul 29 16:33:18.594: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . - Jul 29 16:33:18.594: INFO: update-demo-nautilus-t46pp is verified up and running - STEP: using delete to clean up resources 07/29/23 16:33:18.594 - Jul 29 16:33:18.595: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 delete --grace-period=0 --force -f -' - Jul 29 16:33:18.736: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" - Jul 29 16:33:18.736: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" - Jul 29 16:33:18.736: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get rc,svc -l name=update-demo --no-headers' - Jul 29 16:33:18.958: INFO: stderr: "No resources found in kubectl-6391 namespace.\n" - Jul 29 16:33:18.958: INFO: stdout: "" - Jul 29 16:33:18.959: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-6391 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' - Jul 29 16:33:19.183: INFO: stderr: "" - Jul 29 16:33:19.183: INFO: stdout: "" - [AfterEach] [sig-cli] Kubectl client + [AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:33:19.184: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 12:41:37.823: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-6391" for this suite. 07/29/23 16:33:19.196 + STEP: Destroying namespace "daemonsets-6175" for this suite. 08/24/23 12:41:37.837 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Security Context When creating a container with runAsUser - should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:347 +[sig-node] Security Context + should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + test/e2e/node/security_context.go:164 [BeforeEach] [sig-node] Security Context set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:33:19.213 -Jul 29 16:33:19.213: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename security-context-test 07/29/23 16:33:19.218 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:33:19.245 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:33:19.249 +STEP: Creating a kubernetes client 08/24/23 12:41:37.862 +Aug 24 12:41:37.862: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename security-context 08/24/23 12:41:37.864 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:37.899 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:37.905 [BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Security Context - test/e2e/common/node/security_context.go:50 -[It] should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:347 -Jul 29 16:33:19.266: INFO: Waiting up to 5m0s for pod "busybox-user-65534-4b83a8bf-0a04-4a22-8cbc-b144b7769480" in namespace "security-context-test-166" to be "Succeeded or Failed" -Jul 29 16:33:19.274: INFO: Pod "busybox-user-65534-4b83a8bf-0a04-4a22-8cbc-b144b7769480": Phase="Pending", Reason="", readiness=false. Elapsed: 7.798135ms -Jul 29 16:33:21.289: INFO: Pod "busybox-user-65534-4b83a8bf-0a04-4a22-8cbc-b144b7769480": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022933364s -Jul 29 16:33:23.282: INFO: Pod "busybox-user-65534-4b83a8bf-0a04-4a22-8cbc-b144b7769480": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016368438s -Jul 29 16:33:23.283: INFO: Pod "busybox-user-65534-4b83a8bf-0a04-4a22-8cbc-b144b7769480" satisfied condition "Succeeded or Failed" +[It] should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + test/e2e/node/security_context.go:164 +STEP: Creating a pod to test pod.Spec.SecurityContext.RunAsUser 08/24/23 12:41:37.91 +Aug 24 12:41:37.926: INFO: Waiting up to 5m0s for pod "security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb" in namespace "security-context-4176" to be "Succeeded or Failed" +Aug 24 12:41:37.933: INFO: Pod "security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb": Phase="Pending", Reason="", readiness=false. Elapsed: 6.273115ms +Aug 24 12:41:39.943: INFO: Pod "security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01674136s +Aug 24 12:41:41.940: INFO: Pod "security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013219589s +STEP: Saw pod success 08/24/23 12:41:41.94 +Aug 24 12:41:41.940: INFO: Pod "security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb" satisfied condition "Succeeded or Failed" +Aug 24 12:41:41.946: INFO: Trying to get logs from node pe9deep4seen-3 pod security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb container test-container: +STEP: delete the pod 08/24/23 12:41:41.959 +Aug 24 12:41:41.981: INFO: Waiting for pod security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb to disappear +Aug 24 12:41:41.986: INFO: Pod security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb no longer exists [AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 -Jul 29 16:33:23.283: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:41:41.987: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 -STEP: Destroying namespace "security-context-test-166" for this suite. 07/29/23 16:33:23.293 +STEP: Destroying namespace "security-context-4176" for this suite. 08/24/23 12:41:41.996 ------------------------------ -• [4.097 seconds] +• [4.145 seconds] [sig-node] Security Context -test/e2e/common/node/framework.go:23 - When creating a container with runAsUser - test/e2e/common/node/security_context.go:309 - should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:347 +test/e2e/node/framework.go:23 + should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + test/e2e/node/security_context.go:164 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-node] Security Context set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:33:19.213 - Jul 29 16:33:19.213: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename security-context-test 07/29/23 16:33:19.218 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:33:19.245 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:33:19.249 + STEP: Creating a kubernetes client 08/24/23 12:41:37.862 + Aug 24 12:41:37.862: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename security-context 08/24/23 12:41:37.864 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:37.899 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:37.905 [BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Security Context - test/e2e/common/node/security_context.go:50 - [It] should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:347 - Jul 29 16:33:19.266: INFO: Waiting up to 5m0s for pod "busybox-user-65534-4b83a8bf-0a04-4a22-8cbc-b144b7769480" in namespace "security-context-test-166" to be "Succeeded or Failed" - Jul 29 16:33:19.274: INFO: Pod "busybox-user-65534-4b83a8bf-0a04-4a22-8cbc-b144b7769480": Phase="Pending", Reason="", readiness=false. Elapsed: 7.798135ms - Jul 29 16:33:21.289: INFO: Pod "busybox-user-65534-4b83a8bf-0a04-4a22-8cbc-b144b7769480": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022933364s - Jul 29 16:33:23.282: INFO: Pod "busybox-user-65534-4b83a8bf-0a04-4a22-8cbc-b144b7769480": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016368438s - Jul 29 16:33:23.283: INFO: Pod "busybox-user-65534-4b83a8bf-0a04-4a22-8cbc-b144b7769480" satisfied condition "Succeeded or Failed" + [It] should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + test/e2e/node/security_context.go:164 + STEP: Creating a pod to test pod.Spec.SecurityContext.RunAsUser 08/24/23 12:41:37.91 + Aug 24 12:41:37.926: INFO: Waiting up to 5m0s for pod "security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb" in namespace "security-context-4176" to be "Succeeded or Failed" + Aug 24 12:41:37.933: INFO: Pod "security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb": Phase="Pending", Reason="", readiness=false. Elapsed: 6.273115ms + Aug 24 12:41:39.943: INFO: Pod "security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01674136s + Aug 24 12:41:41.940: INFO: Pod "security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013219589s + STEP: Saw pod success 08/24/23 12:41:41.94 + Aug 24 12:41:41.940: INFO: Pod "security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb" satisfied condition "Succeeded or Failed" + Aug 24 12:41:41.946: INFO: Trying to get logs from node pe9deep4seen-3 pod security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb container test-container: + STEP: delete the pod 08/24/23 12:41:41.959 + Aug 24 12:41:41.981: INFO: Waiting for pod security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb to disappear + Aug 24 12:41:41.986: INFO: Pod security-context-7b0e1685-ce31-4540-82f6-cb179ee64adb no longer exists [AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 - Jul 29 16:33:23.283: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:41:41.987: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 - STEP: Destroying namespace "security-context-test-166" for this suite. 07/29/23 16:33:23.293 - << End Captured GinkgoWriter Output ------------------------------- -SS ------------------------------- -[sig-apps] CronJob - should schedule multiple jobs concurrently [Conformance] - test/e2e/apps/cronjob.go:69 -[BeforeEach] [sig-apps] CronJob - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:33:23.311 -Jul 29 16:33:23.311: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename cronjob 07/29/23 16:33:23.314 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:33:23.345 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:33:23.35 -[BeforeEach] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:31 -[It] should schedule multiple jobs concurrently [Conformance] - test/e2e/apps/cronjob.go:69 -STEP: Creating a cronjob 07/29/23 16:33:23.353 -STEP: Ensuring more than one job is running at a time 07/29/23 16:33:23.363 -STEP: Ensuring at least two running jobs exists by listing jobs explicitly 07/29/23 16:35:01.373 -STEP: Removing cronjob 07/29/23 16:35:01.385 -[AfterEach] [sig-apps] CronJob - test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:01.408: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] CronJob - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] CronJob - tear down framework | framework.go:193 -STEP: Destroying namespace "cronjob-6237" for this suite. 07/29/23 16:35:01.423 ------------------------------- -• [SLOW TEST] [98.131 seconds] -[sig-apps] CronJob -test/e2e/apps/framework.go:23 - should schedule multiple jobs concurrently [Conformance] - test/e2e/apps/cronjob.go:69 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] CronJob - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:33:23.311 - Jul 29 16:33:23.311: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename cronjob 07/29/23 16:33:23.314 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:33:23.345 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:33:23.35 - [BeforeEach] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:31 - [It] should schedule multiple jobs concurrently [Conformance] - test/e2e/apps/cronjob.go:69 - STEP: Creating a cronjob 07/29/23 16:33:23.353 - STEP: Ensuring more than one job is running at a time 07/29/23 16:33:23.363 - STEP: Ensuring at least two running jobs exists by listing jobs explicitly 07/29/23 16:35:01.373 - STEP: Removing cronjob 07/29/23 16:35:01.385 - [AfterEach] [sig-apps] CronJob - test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:01.408: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] CronJob - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] CronJob - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] CronJob - tear down framework | framework.go:193 - STEP: Destroying namespace "cronjob-6237" for this suite. 07/29/23 16:35:01.423 + STEP: Destroying namespace "security-context-4176" for this suite. 08/24/23 12:41:41.996 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl server-side dry-run - should check if kubectl can dry-run update Pods [Conformance] - test/e2e/kubectl/kubectl.go:962 +[sig-cli] Kubectl client Kubectl run pod + should create a pod from an image when restart is Never [Conformance] + test/e2e/kubectl/kubectl.go:1713 [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:01.448 -Jul 29 16:35:01.448: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:35:01.454 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:01.516 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:01.538 +STEP: Creating a kubernetes client 08/24/23 12:41:42.008 +Aug 24 12:41:42.008: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:41:42.01 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:42.04 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:42.044 [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 -[It] should check if kubectl can dry-run update Pods [Conformance] - test/e2e/kubectl/kubectl.go:962 -STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 07/29/23 16:35:01.561 -Jul 29 16:35:01.566: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-8901 run e2e-test-httpd-pod --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4 --pod-running-timeout=2m0s --labels=run=e2e-test-httpd-pod' -Jul 29 16:35:01.715: INFO: stderr: "" -Jul 29 16:35:01.715: INFO: stdout: "pod/e2e-test-httpd-pod created\n" -STEP: replace the image in the pod with server-side dry-run 07/29/23 16:35:01.715 -Jul 29 16:35:01.716: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-8901 patch pod e2e-test-httpd-pod -p {"spec":{"containers":[{"name": "e2e-test-httpd-pod","image": "registry.k8s.io/e2e-test-images/busybox:1.29-4"}]}} --dry-run=server' -Jul 29 16:35:02.225: INFO: stderr: "" -Jul 29 16:35:02.225: INFO: stdout: "pod/e2e-test-httpd-pod patched\n" -STEP: verifying the pod e2e-test-httpd-pod has the right image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 07/29/23 16:35:02.225 -Jul 29 16:35:02.237: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-8901 delete pods e2e-test-httpd-pod' -Jul 29 16:35:04.653: INFO: stderr: "" -Jul 29 16:35:04.653: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" +[BeforeEach] Kubectl run pod + test/e2e/kubectl/kubectl.go:1700 +[It] should create a pod from an image when restart is Never [Conformance] + test/e2e/kubectl/kubectl.go:1713 +STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 08/24/23 12:41:42.051 +Aug 24 12:41:42.051: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-308 run e2e-test-httpd-pod --restart=Never --pod-running-timeout=2m0s --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4' +Aug 24 12:41:42.272: INFO: stderr: "" +Aug 24 12:41:42.272: INFO: stdout: "pod/e2e-test-httpd-pod created\n" +STEP: verifying the pod e2e-test-httpd-pod was created 08/24/23 12:41:42.272 +[AfterEach] Kubectl run pod + test/e2e/kubectl/kubectl.go:1704 +Aug 24 12:41:42.293: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-308 delete pods e2e-test-httpd-pod' +Aug 24 12:41:44.997: INFO: stderr: "" +Aug 24 12:41:44.997: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:04.653: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:41:44.997: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-8901" for this suite. 07/29/23 16:35:04.665 +STEP: Destroying namespace "kubectl-308" for this suite. 08/24/23 12:41:45.007 ------------------------------ -• [3.233 seconds] +• [3.010 seconds] [sig-cli] Kubectl client test/e2e/kubectl/framework.go:23 - Kubectl server-side dry-run - test/e2e/kubectl/kubectl.go:956 - should check if kubectl can dry-run update Pods [Conformance] - test/e2e/kubectl/kubectl.go:962 + Kubectl run pod + test/e2e/kubectl/kubectl.go:1697 + should create a pod from an image when restart is Never [Conformance] + test/e2e/kubectl/kubectl.go:1713 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:01.448 - Jul 29 16:35:01.448: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:35:01.454 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:01.516 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:01.538 + STEP: Creating a kubernetes client 08/24/23 12:41:42.008 + Aug 24 12:41:42.008: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:41:42.01 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:42.04 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:42.044 [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 - [It] should check if kubectl can dry-run update Pods [Conformance] - test/e2e/kubectl/kubectl.go:962 - STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 07/29/23 16:35:01.561 - Jul 29 16:35:01.566: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-8901 run e2e-test-httpd-pod --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4 --pod-running-timeout=2m0s --labels=run=e2e-test-httpd-pod' - Jul 29 16:35:01.715: INFO: stderr: "" - Jul 29 16:35:01.715: INFO: stdout: "pod/e2e-test-httpd-pod created\n" - STEP: replace the image in the pod with server-side dry-run 07/29/23 16:35:01.715 - Jul 29 16:35:01.716: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-8901 patch pod e2e-test-httpd-pod -p {"spec":{"containers":[{"name": "e2e-test-httpd-pod","image": "registry.k8s.io/e2e-test-images/busybox:1.29-4"}]}} --dry-run=server' - Jul 29 16:35:02.225: INFO: stderr: "" - Jul 29 16:35:02.225: INFO: stdout: "pod/e2e-test-httpd-pod patched\n" - STEP: verifying the pod e2e-test-httpd-pod has the right image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 07/29/23 16:35:02.225 - Jul 29 16:35:02.237: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-8901 delete pods e2e-test-httpd-pod' - Jul 29 16:35:04.653: INFO: stderr: "" - Jul 29 16:35:04.653: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" + [BeforeEach] Kubectl run pod + test/e2e/kubectl/kubectl.go:1700 + [It] should create a pod from an image when restart is Never [Conformance] + test/e2e/kubectl/kubectl.go:1713 + STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 08/24/23 12:41:42.051 + Aug 24 12:41:42.051: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-308 run e2e-test-httpd-pod --restart=Never --pod-running-timeout=2m0s --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4' + Aug 24 12:41:42.272: INFO: stderr: "" + Aug 24 12:41:42.272: INFO: stdout: "pod/e2e-test-httpd-pod created\n" + STEP: verifying the pod e2e-test-httpd-pod was created 08/24/23 12:41:42.272 + [AfterEach] Kubectl run pod + test/e2e/kubectl/kubectl.go:1704 + Aug 24 12:41:42.293: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-308 delete pods e2e-test-httpd-pod' + Aug 24 12:41:44.997: INFO: stderr: "" + Aug 24 12:41:44.997: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:04.653: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:41:44.997: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-8901" for this suite. 07/29/23 16:35:04.665 + STEP: Destroying namespace "kubectl-308" for this suite. 08/24/23 12:41:45.007 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:167 -[BeforeEach] [sig-storage] EmptyDir volumes +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod [Conformance] + test/e2e/storage/subpath.go:70 +[BeforeEach] [sig-storage] Subpath set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:04.682 -Jul 29 16:35:04.682: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 16:35:04.685 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:04.715 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:04.72 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 12:41:45.019 +Aug 24 12:41:45.019: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename subpath 08/24/23 12:41:45.023 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:45.059 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:45.065 +[BeforeEach] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:31 -[It] should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:167 -STEP: Creating a pod to test emptydir 0644 on node default medium 07/29/23 16:35:04.726 -Jul 29 16:35:04.746: INFO: Waiting up to 5m0s for pod "pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb" in namespace "emptydir-8832" to be "Succeeded or Failed" -Jul 29 16:35:04.753: INFO: Pod "pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb": Phase="Pending", Reason="", readiness=false. Elapsed: 6.32529ms -Jul 29 16:35:06.760: INFO: Pod "pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013911248s -Jul 29 16:35:08.762: INFO: Pod "pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015692955s -STEP: Saw pod success 07/29/23 16:35:08.762 -Jul 29 16:35:08.763: INFO: Pod "pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb" satisfied condition "Succeeded or Failed" -Jul 29 16:35:08.768: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb container test-container: -STEP: delete the pod 07/29/23 16:35:08.8 -Jul 29 16:35:08.825: INFO: Waiting for pod pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb to disappear -Jul 29 16:35:08.831: INFO: Pod pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[BeforeEach] Atomic writer volumes + test/e2e/storage/subpath.go:40 +STEP: Setting up data 08/24/23 12:41:45.072 +[It] should support subpaths with configmap pod [Conformance] + test/e2e/storage/subpath.go:70 +STEP: Creating pod pod-subpath-test-configmap-fhmm 08/24/23 12:41:45.095 +STEP: Creating a pod to test atomic-volume-subpath 08/24/23 12:41:45.095 +Aug 24 12:41:45.120: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-fhmm" in namespace "subpath-874" to be "Succeeded or Failed" +Aug 24 12:41:45.125: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Pending", Reason="", readiness=false. Elapsed: 5.2606ms +Aug 24 12:41:47.132: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 2.012737273s +Aug 24 12:41:49.134: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 4.013981102s +Aug 24 12:41:51.137: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 6.017226553s +Aug 24 12:41:53.136: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 8.01629814s +Aug 24 12:41:55.132: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 10.012414589s +Aug 24 12:41:57.133: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 12.013582052s +Aug 24 12:41:59.134: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 14.013899791s +Aug 24 12:42:01.133: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 16.013755897s +Aug 24 12:42:03.143: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 18.023488648s +Aug 24 12:42:05.134: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 20.014015763s +Aug 24 12:42:07.135: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=false. Elapsed: 22.015438786s +Aug 24 12:42:09.132: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.012596731s +STEP: Saw pod success 08/24/23 12:42:09.132 +Aug 24 12:42:09.133: INFO: Pod "pod-subpath-test-configmap-fhmm" satisfied condition "Succeeded or Failed" +Aug 24 12:42:09.138: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-subpath-test-configmap-fhmm container test-container-subpath-configmap-fhmm: +STEP: delete the pod 08/24/23 12:42:09.154 +Aug 24 12:42:09.180: INFO: Waiting for pod pod-subpath-test-configmap-fhmm to disappear +Aug 24 12:42:09.186: INFO: Pod pod-subpath-test-configmap-fhmm no longer exists +STEP: Deleting pod pod-subpath-test-configmap-fhmm 08/24/23 12:42:09.186 +Aug 24 12:42:09.187: INFO: Deleting pod "pod-subpath-test-configmap-fhmm" in namespace "subpath-874" +[AfterEach] [sig-storage] Subpath test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:08.832: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 12:42:09.198: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-storage] Subpath dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-storage] Subpath tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-8832" for this suite. 07/29/23 16:35:08.84 +STEP: Destroying namespace "subpath-874" for this suite. 08/24/23 12:42:09.208 ------------------------------ -• [4.170 seconds] -[sig-storage] EmptyDir volumes -test/e2e/common/storage/framework.go:23 - should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:167 +• [SLOW TEST] [24.201 seconds] +[sig-storage] Subpath +test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + test/e2e/storage/subpath.go:36 + should support subpaths with configmap pod [Conformance] + test/e2e/storage/subpath.go:70 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-storage] Subpath set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:04.682 - Jul 29 16:35:04.682: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 16:35:04.685 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:04.715 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:04.72 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 12:41:45.019 + Aug 24 12:41:45.019: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename subpath 08/24/23 12:41:45.023 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:41:45.059 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:41:45.065 + [BeforeEach] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:31 - [It] should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:167 - STEP: Creating a pod to test emptydir 0644 on node default medium 07/29/23 16:35:04.726 - Jul 29 16:35:04.746: INFO: Waiting up to 5m0s for pod "pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb" in namespace "emptydir-8832" to be "Succeeded or Failed" - Jul 29 16:35:04.753: INFO: Pod "pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb": Phase="Pending", Reason="", readiness=false. Elapsed: 6.32529ms - Jul 29 16:35:06.760: INFO: Pod "pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013911248s - Jul 29 16:35:08.762: INFO: Pod "pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015692955s - STEP: Saw pod success 07/29/23 16:35:08.762 - Jul 29 16:35:08.763: INFO: Pod "pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb" satisfied condition "Succeeded or Failed" - Jul 29 16:35:08.768: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb container test-container: - STEP: delete the pod 07/29/23 16:35:08.8 - Jul 29 16:35:08.825: INFO: Waiting for pod pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb to disappear - Jul 29 16:35:08.831: INFO: Pod pod-8599c011-1ee4-4d8a-bfff-7fecb86d86fb no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [BeforeEach] Atomic writer volumes + test/e2e/storage/subpath.go:40 + STEP: Setting up data 08/24/23 12:41:45.072 + [It] should support subpaths with configmap pod [Conformance] + test/e2e/storage/subpath.go:70 + STEP: Creating pod pod-subpath-test-configmap-fhmm 08/24/23 12:41:45.095 + STEP: Creating a pod to test atomic-volume-subpath 08/24/23 12:41:45.095 + Aug 24 12:41:45.120: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-fhmm" in namespace "subpath-874" to be "Succeeded or Failed" + Aug 24 12:41:45.125: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Pending", Reason="", readiness=false. Elapsed: 5.2606ms + Aug 24 12:41:47.132: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 2.012737273s + Aug 24 12:41:49.134: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 4.013981102s + Aug 24 12:41:51.137: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 6.017226553s + Aug 24 12:41:53.136: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 8.01629814s + Aug 24 12:41:55.132: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 10.012414589s + Aug 24 12:41:57.133: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 12.013582052s + Aug 24 12:41:59.134: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 14.013899791s + Aug 24 12:42:01.133: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 16.013755897s + Aug 24 12:42:03.143: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 18.023488648s + Aug 24 12:42:05.134: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=true. Elapsed: 20.014015763s + Aug 24 12:42:07.135: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Running", Reason="", readiness=false. Elapsed: 22.015438786s + Aug 24 12:42:09.132: INFO: Pod "pod-subpath-test-configmap-fhmm": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.012596731s + STEP: Saw pod success 08/24/23 12:42:09.132 + Aug 24 12:42:09.133: INFO: Pod "pod-subpath-test-configmap-fhmm" satisfied condition "Succeeded or Failed" + Aug 24 12:42:09.138: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-subpath-test-configmap-fhmm container test-container-subpath-configmap-fhmm: + STEP: delete the pod 08/24/23 12:42:09.154 + Aug 24 12:42:09.180: INFO: Waiting for pod pod-subpath-test-configmap-fhmm to disappear + Aug 24 12:42:09.186: INFO: Pod pod-subpath-test-configmap-fhmm no longer exists + STEP: Deleting pod pod-subpath-test-configmap-fhmm 08/24/23 12:42:09.186 + Aug 24 12:42:09.187: INFO: Deleting pod "pod-subpath-test-configmap-fhmm" in namespace "subpath-874" + [AfterEach] [sig-storage] Subpath test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:08.832: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 12:42:09.198: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Subpath test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-storage] Subpath dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-storage] Subpath tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-8832" for this suite. 07/29/23 16:35:08.84 + STEP: Destroying namespace "subpath-874" for this suite. 08/24/23 12:42:09.208 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSS +S ------------------------------ -[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook - should execute poststart exec hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:134 -[BeforeEach] [sig-node] Container Lifecycle Hook +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + custom resource defaulting for requests and from storage works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:269 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:08.853 -Jul 29 16:35:08.854: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-lifecycle-hook 07/29/23 16:35:08.857 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:08.887 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:08.892 -[BeforeEach] [sig-node] Container Lifecycle Hook +STEP: Creating a kubernetes client 08/24/23 12:42:09.223 +Aug 24 12:42:09.223: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename custom-resource-definition 08/24/23 12:42:09.226 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:42:09.26 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:42:09.267 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:77 -STEP: create the container to handle the HTTPGet hook request. 07/29/23 16:35:08.904 -Jul 29 16:35:08.919: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-1068" to be "running and ready" -Jul 29 16:35:08.925: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 5.378519ms -Jul 29 16:35:08.925: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:35:10.933: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 2.013586584s -Jul 29 16:35:10.933: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) -Jul 29 16:35:10.933: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" -[It] should execute poststart exec hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:134 -STEP: create the pod with lifecycle hook 07/29/23 16:35:10.951 -Jul 29 16:35:10.962: INFO: Waiting up to 5m0s for pod "pod-with-poststart-exec-hook" in namespace "container-lifecycle-hook-1068" to be "running and ready" -Jul 29 16:35:10.972: INFO: Pod "pod-with-poststart-exec-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 10.029199ms -Jul 29 16:35:10.972: INFO: The phase of Pod pod-with-poststart-exec-hook is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:35:12.982: INFO: Pod "pod-with-poststart-exec-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.019990312s -Jul 29 16:35:12.982: INFO: The phase of Pod pod-with-poststart-exec-hook is Running (Ready = true) -Jul 29 16:35:12.982: INFO: Pod "pod-with-poststart-exec-hook" satisfied condition "running and ready" -STEP: check poststart hook 07/29/23 16:35:12.99 -STEP: delete the pod with lifecycle hook 07/29/23 16:35:13.035 -Jul 29 16:35:13.053: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Jul 29 16:35:13.060: INFO: Pod pod-with-poststart-exec-hook still exists -Jul 29 16:35:15.062: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Jul 29 16:35:15.070: INFO: Pod pod-with-poststart-exec-hook still exists -Jul 29 16:35:17.063: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear -Jul 29 16:35:17.074: INFO: Pod pod-with-poststart-exec-hook no longer exists -[AfterEach] [sig-node] Container Lifecycle Hook +[It] custom resource defaulting for requests and from storage works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:269 +Aug 24 12:42:09.273: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:17.074: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook +Aug 24 12:42:12.811: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "container-lifecycle-hook-1068" for this suite. 07/29/23 16:35:17.084 +STEP: Destroying namespace "custom-resource-definition-3153" for this suite. 08/24/23 12:42:12.82 ------------------------------ -• [SLOW TEST] [8.250 seconds] -[sig-node] Container Lifecycle Hook -test/e2e/common/node/framework.go:23 - when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:46 - should execute poststart exec hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:134 +• [3.615 seconds] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + custom resource defaulting for requests and from storage works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:269 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Container Lifecycle Hook + [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:08.853 - Jul 29 16:35:08.854: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-lifecycle-hook 07/29/23 16:35:08.857 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:08.887 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:08.892 - [BeforeEach] [sig-node] Container Lifecycle Hook + STEP: Creating a kubernetes client 08/24/23 12:42:09.223 + Aug 24 12:42:09.223: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename custom-resource-definition 08/24/23 12:42:09.226 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:42:09.26 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:42:09.267 + [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:77 - STEP: create the container to handle the HTTPGet hook request. 07/29/23 16:35:08.904 - Jul 29 16:35:08.919: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-1068" to be "running and ready" - Jul 29 16:35:08.925: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 5.378519ms - Jul 29 16:35:08.925: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:35:10.933: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 2.013586584s - Jul 29 16:35:10.933: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) - Jul 29 16:35:10.933: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" - [It] should execute poststart exec hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:134 - STEP: create the pod with lifecycle hook 07/29/23 16:35:10.951 - Jul 29 16:35:10.962: INFO: Waiting up to 5m0s for pod "pod-with-poststart-exec-hook" in namespace "container-lifecycle-hook-1068" to be "running and ready" - Jul 29 16:35:10.972: INFO: Pod "pod-with-poststart-exec-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 10.029199ms - Jul 29 16:35:10.972: INFO: The phase of Pod pod-with-poststart-exec-hook is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:35:12.982: INFO: Pod "pod-with-poststart-exec-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.019990312s - Jul 29 16:35:12.982: INFO: The phase of Pod pod-with-poststart-exec-hook is Running (Ready = true) - Jul 29 16:35:12.982: INFO: Pod "pod-with-poststart-exec-hook" satisfied condition "running and ready" - STEP: check poststart hook 07/29/23 16:35:12.99 - STEP: delete the pod with lifecycle hook 07/29/23 16:35:13.035 - Jul 29 16:35:13.053: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear - Jul 29 16:35:13.060: INFO: Pod pod-with-poststart-exec-hook still exists - Jul 29 16:35:15.062: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear - Jul 29 16:35:15.070: INFO: Pod pod-with-poststart-exec-hook still exists - Jul 29 16:35:17.063: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear - Jul 29 16:35:17.074: INFO: Pod pod-with-poststart-exec-hook no longer exists - [AfterEach] [sig-node] Container Lifecycle Hook + [It] custom resource defaulting for requests and from storage works [Conformance] + test/e2e/apimachinery/custom_resource_definition.go:269 + Aug 24 12:42:09.273: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + test/e2e/framework/node/init/init.go:32 + Aug 24 12:42:12.811: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + tear down framework | framework.go:193 + STEP: Destroying namespace "custom-resource-definition-3153" for this suite. 08/24/23 12:42:12.82 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Namespaces [Serial] + should apply changes to a namespace status [Conformance] + test/e2e/apimachinery/namespace.go:299 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:42:12.851 +Aug 24 12:42:12.851: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename namespaces 08/24/23 12:42:12.853 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:42:12.893 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:42:12.898 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] + test/e2e/framework/metrics/init/init.go:31 +[It] should apply changes to a namespace status [Conformance] + test/e2e/apimachinery/namespace.go:299 +STEP: Read namespace status 08/24/23 12:42:12.904 +Aug 24 12:42:12.910: INFO: Status: v1.NamespaceStatus{Phase:"Active", Conditions:[]v1.NamespaceCondition(nil)} +STEP: Patch namespace status 08/24/23 12:42:12.911 +Aug 24 12:42:12.923: INFO: Status.Condition: v1.NamespaceCondition{Type:"StatusPatch", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Patched by an e2e test"} +STEP: Update namespace status 08/24/23 12:42:12.923 +Aug 24 12:42:12.941: INFO: Status.Condition: v1.NamespaceCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Updated by an e2e test"} +[AfterEach] [sig-api-machinery] Namespaces [Serial] + test/e2e/framework/node/init/init.go:32 +Aug 24 12:42:12.941: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + tear down framework | framework.go:193 +STEP: Destroying namespace "namespaces-4061" for this suite. 08/24/23 12:42:12.949 +------------------------------ +• [0.112 seconds] +[sig-api-machinery] Namespaces [Serial] +test/e2e/apimachinery/framework.go:23 + should apply changes to a namespace status [Conformance] + test/e2e/apimachinery/namespace.go:299 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-api-machinery] Namespaces [Serial] + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:42:12.851 + Aug 24 12:42:12.851: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename namespaces 08/24/23 12:42:12.853 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:42:12.893 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:42:12.898 + [BeforeEach] [sig-api-machinery] Namespaces [Serial] + test/e2e/framework/metrics/init/init.go:31 + [It] should apply changes to a namespace status [Conformance] + test/e2e/apimachinery/namespace.go:299 + STEP: Read namespace status 08/24/23 12:42:12.904 + Aug 24 12:42:12.910: INFO: Status: v1.NamespaceStatus{Phase:"Active", Conditions:[]v1.NamespaceCondition(nil)} + STEP: Patch namespace status 08/24/23 12:42:12.911 + Aug 24 12:42:12.923: INFO: Status.Condition: v1.NamespaceCondition{Type:"StatusPatch", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Patched by an e2e test"} + STEP: Update namespace status 08/24/23 12:42:12.923 + Aug 24 12:42:12.941: INFO: Status.Condition: v1.NamespaceCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Updated by an e2e test"} + [AfterEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:17.074: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + Aug 24 12:42:12.941: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "container-lifecycle-hook-1068" for this suite. 07/29/23 16:35:17.084 + STEP: Destroying namespace "namespaces-4061" for this suite. 08/24/23 12:42:12.949 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:187 -[BeforeEach] [sig-storage] EmptyDir volumes +[sig-storage] Projected downwardAPI + should update annotations on modification [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:162 +[BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:17.105 -Jul 29 16:35:17.105: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 16:35:17.109 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:17.133 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:17.141 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 12:42:12.971 +Aug 24 12:42:12.971: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:42:12.974 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:42:13.014 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:42:13.018 +[BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 -[It] should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:187 -STEP: Creating a pod to test emptydir 0777 on node default medium 07/29/23 16:35:17.151 -Jul 29 16:35:17.169: INFO: Waiting up to 5m0s for pod "pod-247ab53b-172b-405e-b8c8-860570711235" in namespace "emptydir-4062" to be "Succeeded or Failed" -Jul 29 16:35:17.175: INFO: Pod "pod-247ab53b-172b-405e-b8c8-860570711235": Phase="Pending", Reason="", readiness=false. Elapsed: 6.698922ms -Jul 29 16:35:19.185: INFO: Pod "pod-247ab53b-172b-405e-b8c8-860570711235": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015843199s -Jul 29 16:35:21.185: INFO: Pod "pod-247ab53b-172b-405e-b8c8-860570711235": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016324482s -STEP: Saw pod success 07/29/23 16:35:21.185 -Jul 29 16:35:21.186: INFO: Pod "pod-247ab53b-172b-405e-b8c8-860570711235" satisfied condition "Succeeded or Failed" -Jul 29 16:35:21.194: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-247ab53b-172b-405e-b8c8-860570711235 container test-container: -STEP: delete the pod 07/29/23 16:35:21.209 -Jul 29 16:35:21.234: INFO: Waiting for pod pod-247ab53b-172b-405e-b8c8-860570711235 to disappear -Jul 29 16:35:21.242: INFO: Pod pod-247ab53b-172b-405e-b8c8-860570711235 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 +[It] should update annotations on modification [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:162 +STEP: Creating the pod 08/24/23 12:42:13.023 +Aug 24 12:42:13.037: INFO: Waiting up to 5m0s for pod "annotationupdate39783633-939d-4294-837f-0ecda8ac45a4" in namespace "projected-6134" to be "running and ready" +Aug 24 12:42:13.047: INFO: Pod "annotationupdate39783633-939d-4294-837f-0ecda8ac45a4": Phase="Pending", Reason="", readiness=false. Elapsed: 9.468582ms +Aug 24 12:42:13.047: INFO: The phase of Pod annotationupdate39783633-939d-4294-837f-0ecda8ac45a4 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:42:15.056: INFO: Pod "annotationupdate39783633-939d-4294-837f-0ecda8ac45a4": Phase="Running", Reason="", readiness=true. Elapsed: 2.018654512s +Aug 24 12:42:15.056: INFO: The phase of Pod annotationupdate39783633-939d-4294-837f-0ecda8ac45a4 is Running (Ready = true) +Aug 24 12:42:15.056: INFO: Pod "annotationupdate39783633-939d-4294-837f-0ecda8ac45a4" satisfied condition "running and ready" +Aug 24 12:42:15.609: INFO: Successfully updated pod "annotationupdate39783633-939d-4294-837f-0ecda8ac45a4" +[AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:21.242: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 12:42:19.655: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-4062" for this suite. 07/29/23 16:35:21.251 +STEP: Destroying namespace "projected-6134" for this suite. 08/24/23 12:42:19.668 ------------------------------ -• [4.166 seconds] -[sig-storage] EmptyDir volumes +• [SLOW TEST] [6.712 seconds] +[sig-storage] Projected downwardAPI test/e2e/common/storage/framework.go:23 - should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:187 + should update annotations on modification [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:162 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:17.105 - Jul 29 16:35:17.105: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 16:35:17.109 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:17.133 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:17.141 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 12:42:12.971 + Aug 24 12:42:12.971: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:42:12.974 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:42:13.014 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:42:13.018 + [BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 - [It] should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:187 - STEP: Creating a pod to test emptydir 0777 on node default medium 07/29/23 16:35:17.151 - Jul 29 16:35:17.169: INFO: Waiting up to 5m0s for pod "pod-247ab53b-172b-405e-b8c8-860570711235" in namespace "emptydir-4062" to be "Succeeded or Failed" - Jul 29 16:35:17.175: INFO: Pod "pod-247ab53b-172b-405e-b8c8-860570711235": Phase="Pending", Reason="", readiness=false. Elapsed: 6.698922ms - Jul 29 16:35:19.185: INFO: Pod "pod-247ab53b-172b-405e-b8c8-860570711235": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015843199s - Jul 29 16:35:21.185: INFO: Pod "pod-247ab53b-172b-405e-b8c8-860570711235": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016324482s - STEP: Saw pod success 07/29/23 16:35:21.185 - Jul 29 16:35:21.186: INFO: Pod "pod-247ab53b-172b-405e-b8c8-860570711235" satisfied condition "Succeeded or Failed" - Jul 29 16:35:21.194: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-247ab53b-172b-405e-b8c8-860570711235 container test-container: - STEP: delete the pod 07/29/23 16:35:21.209 - Jul 29 16:35:21.234: INFO: Waiting for pod pod-247ab53b-172b-405e-b8c8-860570711235 to disappear - Jul 29 16:35:21.242: INFO: Pod pod-247ab53b-172b-405e-b8c8-860570711235 no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 + [It] should update annotations on modification [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:162 + STEP: Creating the pod 08/24/23 12:42:13.023 + Aug 24 12:42:13.037: INFO: Waiting up to 5m0s for pod "annotationupdate39783633-939d-4294-837f-0ecda8ac45a4" in namespace "projected-6134" to be "running and ready" + Aug 24 12:42:13.047: INFO: Pod "annotationupdate39783633-939d-4294-837f-0ecda8ac45a4": Phase="Pending", Reason="", readiness=false. Elapsed: 9.468582ms + Aug 24 12:42:13.047: INFO: The phase of Pod annotationupdate39783633-939d-4294-837f-0ecda8ac45a4 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:42:15.056: INFO: Pod "annotationupdate39783633-939d-4294-837f-0ecda8ac45a4": Phase="Running", Reason="", readiness=true. Elapsed: 2.018654512s + Aug 24 12:42:15.056: INFO: The phase of Pod annotationupdate39783633-939d-4294-837f-0ecda8ac45a4 is Running (Ready = true) + Aug 24 12:42:15.056: INFO: Pod "annotationupdate39783633-939d-4294-837f-0ecda8ac45a4" satisfied condition "running and ready" + Aug 24 12:42:15.609: INFO: Successfully updated pod "annotationupdate39783633-939d-4294-837f-0ecda8ac45a4" + [AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:21.242: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 12:42:19.655: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-4062" for this suite. 07/29/23 16:35:21.251 + STEP: Destroying namespace "projected-6134" for this suite. 08/24/23 12:42:19.668 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Pods - should patch a pod status [Conformance] - test/e2e/common/node/pods.go:1083 -[BeforeEach] [sig-node] Pods +[sig-network] DNS + should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] + test/e2e/network/dns.go:193 +[BeforeEach] [sig-network] DNS set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:21.271 -Jul 29 16:35:21.271: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 16:35:21.275 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:21.305 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:21.313 -[BeforeEach] [sig-node] Pods +STEP: Creating a kubernetes client 08/24/23 12:42:19.693 +Aug 24 12:42:19.693: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename dns 08/24/23 12:42:19.696 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:42:19.738 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:42:19.743 +[BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 -[It] should patch a pod status [Conformance] - test/e2e/common/node/pods.go:1083 -STEP: Create a pod 07/29/23 16:35:21.32 -Jul 29 16:35:21.343: INFO: Waiting up to 5m0s for pod "pod-4dp9n" in namespace "pods-1870" to be "running" -Jul 29 16:35:21.366: INFO: Pod "pod-4dp9n": Phase="Pending", Reason="", readiness=false. Elapsed: 22.623815ms -Jul 29 16:35:23.374: INFO: Pod "pod-4dp9n": Phase="Running", Reason="", readiness=true. Elapsed: 2.031144578s -Jul 29 16:35:23.374: INFO: Pod "pod-4dp9n" satisfied condition "running" -STEP: patching /status 07/29/23 16:35:23.374 -Jul 29 16:35:23.391: INFO: Status Message: "Patched by e2e test" and Reason: "E2E" -[AfterEach] [sig-node] Pods +[It] should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] + test/e2e/network/dns.go:193 +STEP: Creating a test headless service 08/24/23 12:42:19.754 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-428 A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-428;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-428 A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-428;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-428.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-428.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-428.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-428.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-428.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-428.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-428.svc;check="$$(dig +notcp +noall +answer +search 42.15.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.15.42_udp@PTR;check="$$(dig +tcp +noall +answer +search 42.15.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.15.42_tcp@PTR;sleep 1; done + 08/24/23 12:42:19.802 +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-428 A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-428;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-428 A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-428;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-428.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-428.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-428.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-428.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-428.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-428.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-428.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-428.svc;check="$$(dig +notcp +noall +answer +search 42.15.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.15.42_udp@PTR;check="$$(dig +tcp +noall +answer +search 42.15.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.15.42_tcp@PTR;sleep 1; done + 08/24/23 12:42:19.802 +STEP: creating a pod to probe DNS 08/24/23 12:42:19.803 +STEP: submitting the pod to kubernetes 08/24/23 12:42:19.804 +Aug 24 12:42:19.839: INFO: Waiting up to 15m0s for pod "dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1" in namespace "dns-428" to be "running" +Aug 24 12:42:19.848: INFO: Pod "dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1": Phase="Pending", Reason="", readiness=false. Elapsed: 9.273022ms +Aug 24 12:42:21.858: INFO: Pod "dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018682047s +Aug 24 12:42:23.857: INFO: Pod "dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1": Phase="Running", Reason="", readiness=true. Elapsed: 4.018037921s +Aug 24 12:42:23.857: INFO: Pod "dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1" satisfied condition "running" +STEP: retrieving the pod 08/24/23 12:42:23.857 +STEP: looking for the results for each expected name from probers 08/24/23 12:42:23.864 +Aug 24 12:42:23.873: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.879: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.887: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.893: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.900: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.905: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.910: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.916: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.944: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.950: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.955: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.960: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.965: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.970: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.974: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:23.980: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:24.003: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + +Aug 24 12:42:29.014: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.026: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.033: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.045: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.054: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.060: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.074: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.082: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.116: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.124: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.130: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.137: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.150: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.155: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.161: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.170: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:29.209: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + +Aug 24 12:42:34.017: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.025: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.031: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.037: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.044: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.051: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.058: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.066: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.111: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.122: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.178: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.188: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.194: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.202: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.210: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.217: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:34.245: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + +Aug 24 12:42:39.011: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.017: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.056: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.063: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.075: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.086: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.091: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.096: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.124: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.130: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.137: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.144: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.151: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.158: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.164: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.171: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:39.197: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + +Aug 24 12:42:44.016: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.024: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.031: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.038: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.048: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.054: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.061: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.072: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.112: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.119: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.128: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.140: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.152: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.160: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.167: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.174: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:44.208: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + +Aug 24 12:42:49.013: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.019: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.024: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.034: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.043: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.054: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.062: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.071: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.107: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.112: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.118: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.124: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.129: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.135: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.144: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.150: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) +Aug 24 12:42:49.176: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + +Aug 24 12:42:54.235: INFO: DNS probes using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 succeeded + +STEP: deleting the pod 08/24/23 12:42:54.235 +STEP: deleting the test service 08/24/23 12:42:54.302 +STEP: deleting the test headless service 08/24/23 12:42:54.348 +[AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:23.391: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods +Aug 24 12:42:54.381: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 -STEP: Destroying namespace "pods-1870" for this suite. 07/29/23 16:35:23.401 +STEP: Destroying namespace "dns-428" for this suite. 08/24/23 12:42:54.403 ------------------------------ -• [2.142 seconds] -[sig-node] Pods -test/e2e/common/node/framework.go:23 - should patch a pod status [Conformance] - test/e2e/common/node/pods.go:1083 +• [SLOW TEST] [34.730 seconds] +[sig-network] DNS +test/e2e/network/common/framework.go:23 + should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] + test/e2e/network/dns.go:193 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods + [BeforeEach] [sig-network] DNS set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:21.271 - Jul 29 16:35:21.271: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 16:35:21.275 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:21.305 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:21.313 - [BeforeEach] [sig-node] Pods + STEP: Creating a kubernetes client 08/24/23 12:42:19.693 + Aug 24 12:42:19.693: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename dns 08/24/23 12:42:19.696 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:42:19.738 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:42:19.743 + [BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 - [It] should patch a pod status [Conformance] - test/e2e/common/node/pods.go:1083 - STEP: Create a pod 07/29/23 16:35:21.32 - Jul 29 16:35:21.343: INFO: Waiting up to 5m0s for pod "pod-4dp9n" in namespace "pods-1870" to be "running" - Jul 29 16:35:21.366: INFO: Pod "pod-4dp9n": Phase="Pending", Reason="", readiness=false. Elapsed: 22.623815ms - Jul 29 16:35:23.374: INFO: Pod "pod-4dp9n": Phase="Running", Reason="", readiness=true. Elapsed: 2.031144578s - Jul 29 16:35:23.374: INFO: Pod "pod-4dp9n" satisfied condition "running" - STEP: patching /status 07/29/23 16:35:23.374 - Jul 29 16:35:23.391: INFO: Status Message: "Patched by e2e test" and Reason: "E2E" - [AfterEach] [sig-node] Pods + [It] should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] + test/e2e/network/dns.go:193 + STEP: Creating a test headless service 08/24/23 12:42:19.754 + STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-428 A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-428;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-428 A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-428;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-428.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-428.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-428.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-428.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-428.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-428.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-428.svc;check="$$(dig +notcp +noall +answer +search 42.15.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.15.42_udp@PTR;check="$$(dig +tcp +noall +answer +search 42.15.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.15.42_tcp@PTR;sleep 1; done + 08/24/23 12:42:19.802 + STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-428 A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-428;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-428 A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-428;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-428.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-428.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-428.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-428.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-428.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-428.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-428.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-428.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-428.svc;check="$$(dig +notcp +noall +answer +search 42.15.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.15.42_udp@PTR;check="$$(dig +tcp +noall +answer +search 42.15.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.15.42_tcp@PTR;sleep 1; done + 08/24/23 12:42:19.802 + STEP: creating a pod to probe DNS 08/24/23 12:42:19.803 + STEP: submitting the pod to kubernetes 08/24/23 12:42:19.804 + Aug 24 12:42:19.839: INFO: Waiting up to 15m0s for pod "dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1" in namespace "dns-428" to be "running" + Aug 24 12:42:19.848: INFO: Pod "dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1": Phase="Pending", Reason="", readiness=false. Elapsed: 9.273022ms + Aug 24 12:42:21.858: INFO: Pod "dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018682047s + Aug 24 12:42:23.857: INFO: Pod "dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1": Phase="Running", Reason="", readiness=true. Elapsed: 4.018037921s + Aug 24 12:42:23.857: INFO: Pod "dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1" satisfied condition "running" + STEP: retrieving the pod 08/24/23 12:42:23.857 + STEP: looking for the results for each expected name from probers 08/24/23 12:42:23.864 + Aug 24 12:42:23.873: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.879: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.887: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.893: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.900: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.905: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.910: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.916: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.944: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.950: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.955: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.960: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.965: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.970: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.974: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:23.980: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:24.003: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + + Aug 24 12:42:29.014: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.026: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.033: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.045: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.054: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.060: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.074: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.082: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.116: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.124: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.130: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.137: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.150: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.155: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.161: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.170: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:29.209: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + + Aug 24 12:42:34.017: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.025: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.031: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.037: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.044: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.051: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.058: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.066: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.111: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.122: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.178: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.188: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.194: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.202: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.210: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.217: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:34.245: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + + Aug 24 12:42:39.011: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.017: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.056: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.063: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.075: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.086: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.091: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.096: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.124: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.130: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.137: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.144: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.151: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.158: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.164: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.171: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:39.197: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + + Aug 24 12:42:44.016: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.024: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.031: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.038: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.048: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.054: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.061: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.072: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.112: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.119: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.128: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.140: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.152: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.160: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.167: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.174: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:44.208: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + + Aug 24 12:42:49.013: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.019: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.024: INFO: Unable to read wheezy_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.034: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.043: INFO: Unable to read wheezy_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.054: INFO: Unable to read wheezy_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.062: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.071: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.107: INFO: Unable to read jessie_udp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.112: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.118: INFO: Unable to read jessie_udp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.124: INFO: Unable to read jessie_tcp@dns-test-service.dns-428 from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.129: INFO: Unable to read jessie_udp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.135: INFO: Unable to read jessie_tcp@dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.144: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.150: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-428.svc from pod dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1: the server could not find the requested resource (get pods dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1) + Aug 24 12:42:49.176: INFO: Lookups using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-428 wheezy_tcp@dns-test-service.dns-428 wheezy_udp@dns-test-service.dns-428.svc wheezy_tcp@dns-test-service.dns-428.svc wheezy_udp@_http._tcp.dns-test-service.dns-428.svc wheezy_tcp@_http._tcp.dns-test-service.dns-428.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-428 jessie_tcp@dns-test-service.dns-428 jessie_udp@dns-test-service.dns-428.svc jessie_tcp@dns-test-service.dns-428.svc jessie_udp@_http._tcp.dns-test-service.dns-428.svc jessie_tcp@_http._tcp.dns-test-service.dns-428.svc] + + Aug 24 12:42:54.235: INFO: DNS probes using dns-428/dns-test-e0af4161-a30e-40ee-8d39-f9fda4abd5e1 succeeded + + STEP: deleting the pod 08/24/23 12:42:54.235 + STEP: deleting the test service 08/24/23 12:42:54.302 + STEP: deleting the test headless service 08/24/23 12:42:54.348 + [AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:23.391: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods + Aug 24 12:42:54.381: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 - STEP: Destroying namespace "pods-1870" for this suite. 07/29/23 16:35:23.401 + STEP: Destroying namespace "dns-428" for this suite. 08/24/23 12:42:54.403 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Security Context - should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] - test/e2e/node/security_context.go:129 -[BeforeEach] [sig-node] Security Context +[sig-scheduling] SchedulerPredicates [Serial] + validates resource limits of pods that are allowed to run [Conformance] + test/e2e/scheduling/predicates.go:331 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:23.42 -Jul 29 16:35:23.420: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename security-context 07/29/23 16:35:23.422 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:23.449 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:23.458 -[BeforeEach] [sig-node] Security Context +STEP: Creating a kubernetes client 08/24/23 12:42:54.437 +Aug 24 12:42:54.437: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sched-pred 08/24/23 12:42:54.444 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:42:54.479 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:42:54.488 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] - test/e2e/node/security_context.go:129 -STEP: Creating a pod to test pod.Spec.SecurityContext.RunAsUser 07/29/23 16:35:23.464 -Jul 29 16:35:23.478: INFO: Waiting up to 5m0s for pod "security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39" in namespace "security-context-8668" to be "Succeeded or Failed" -Jul 29 16:35:23.488: INFO: Pod "security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39": Phase="Pending", Reason="", readiness=false. Elapsed: 9.606733ms -Jul 29 16:35:25.496: INFO: Pod "security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017775212s -Jul 29 16:35:27.498: INFO: Pod "security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019878815s -STEP: Saw pod success 07/29/23 16:35:27.498 -Jul 29 16:35:27.499: INFO: Pod "security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39" satisfied condition "Succeeded or Failed" -Jul 29 16:35:27.509: INFO: Trying to get logs from node wetuj3nuajog-3 pod security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39 container test-container: -STEP: delete the pod 07/29/23 16:35:27.525 -Jul 29 16:35:27.573: INFO: Waiting for pod security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39 to disappear -Jul 29 16:35:27.580: INFO: Pod security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39 no longer exists -[AfterEach] [sig-node] Security Context +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:97 +Aug 24 12:42:54.496: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Aug 24 12:42:54.524: INFO: Waiting for terminating namespaces to be deleted... +Aug 24 12:42:54.532: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-1 before test +Aug 24 12:42:54.565: INFO: cilium-node-init-wqpdx from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.565: INFO: Container node-init ready: true, restart count 0 +Aug 24 12:42:54.565: INFO: cilium-wpzgb from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.565: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 12:42:54.565: INFO: coredns-787d4945fb-8jnm5 from kube-system started at 2023-08-24 11:24:04 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.565: INFO: Container coredns ready: true, restart count 0 +Aug 24 12:42:54.565: INFO: coredns-787d4945fb-d76z6 from kube-system started at 2023-08-24 11:24:07 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.565: INFO: Container coredns ready: true, restart count 0 +Aug 24 12:42:54.565: INFO: kube-addon-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.565: INFO: Container kube-addon-manager ready: true, restart count 0 +Aug 24 12:42:54.566: INFO: kube-apiserver-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.566: INFO: Container kube-apiserver ready: true, restart count 0 +Aug 24 12:42:54.566: INFO: kube-controller-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.566: INFO: Container kube-controller-manager ready: true, restart count 0 +Aug 24 12:42:54.566: INFO: kube-proxy-nr5bs from kube-system started at 2023-08-24 11:21:24 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.566: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 12:42:54.566: INFO: kube-scheduler-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.566: INFO: Container kube-scheduler ready: true, restart count 0 +Aug 24 12:42:54.566: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) +Aug 24 12:42:54.566: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:42:54.566: INFO: Container systemd-logs ready: true, restart count 0 +Aug 24 12:42:54.566: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-2 before test +Aug 24 12:42:54.586: INFO: cilium-node-init-95cbk from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.587: INFO: Container node-init ready: true, restart count 0 +Aug 24 12:42:54.587: INFO: cilium-operator-75f7897945-8qqz2 from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.587: INFO: Container cilium-operator ready: true, restart count 0 +Aug 24 12:42:54.587: INFO: cilium-rcknz from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.587: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 12:42:54.587: INFO: kube-addon-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:37 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.587: INFO: Container kube-addon-manager ready: true, restart count 0 +Aug 24 12:42:54.587: INFO: kube-apiserver-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.587: INFO: Container kube-apiserver ready: true, restart count 0 +Aug 24 12:42:54.588: INFO: kube-controller-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.588: INFO: Container kube-controller-manager ready: true, restart count 0 +Aug 24 12:42:54.588: INFO: kube-proxy-lm2dm from kube-system started at 2023-08-24 11:22:03 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.588: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 12:42:54.588: INFO: kube-scheduler-pe9deep4seen-2 from kube-system started at 2023-08-24 11:25:19 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.588: INFO: Container kube-scheduler ready: true, restart count 0 +Aug 24 12:42:54.588: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-nxmsl from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) +Aug 24 12:42:54.588: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:42:54.588: INFO: Container systemd-logs ready: true, restart count 0 +Aug 24 12:42:54.588: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-3 before test +Aug 24 12:42:54.608: INFO: cilium-node-init-pdcw9 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.609: INFO: Container node-init ready: true, restart count 0 +Aug 24 12:42:54.610: INFO: cilium-xgc44 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.610: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 12:42:54.610: INFO: kube-proxy-8vv8d from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.610: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 12:42:54.610: INFO: sonobuoy from sonobuoy started at 2023-08-24 11:38:19 +0000 UTC (1 container statuses recorded) +Aug 24 12:42:54.611: INFO: Container kube-sonobuoy ready: true, restart count 0 +Aug 24 12:42:54.611: INFO: sonobuoy-e2e-job-b3f52dde3e8a4a4e from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) +Aug 24 12:42:54.611: INFO: Container e2e ready: true, restart count 0 +Aug 24 12:42:54.611: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:42:54.611: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-p6l72 from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) +Aug 24 12:42:54.611: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:42:54.611: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates resource limits of pods that are allowed to run [Conformance] + test/e2e/scheduling/predicates.go:331 +STEP: verifying the node has the label node pe9deep4seen-1 08/24/23 12:42:54.68 +STEP: verifying the node has the label node pe9deep4seen-2 08/24/23 12:42:54.723 +STEP: verifying the node has the label node pe9deep4seen-3 08/24/23 12:42:54.748 +Aug 24 12:42:54.806: INFO: Pod cilium-node-init-95cbk requesting resource cpu=100m on Node pe9deep4seen-2 +Aug 24 12:42:54.806: INFO: Pod cilium-node-init-pdcw9 requesting resource cpu=100m on Node pe9deep4seen-3 +Aug 24 12:42:54.806: INFO: Pod cilium-node-init-wqpdx requesting resource cpu=100m on Node pe9deep4seen-1 +Aug 24 12:42:54.806: INFO: Pod cilium-operator-75f7897945-8qqz2 requesting resource cpu=0m on Node pe9deep4seen-2 +Aug 24 12:42:54.807: INFO: Pod cilium-rcknz requesting resource cpu=0m on Node pe9deep4seen-2 +Aug 24 12:42:54.807: INFO: Pod cilium-wpzgb requesting resource cpu=0m on Node pe9deep4seen-1 +Aug 24 12:42:54.807: INFO: Pod cilium-xgc44 requesting resource cpu=0m on Node pe9deep4seen-3 +Aug 24 12:42:54.807: INFO: Pod coredns-787d4945fb-8jnm5 requesting resource cpu=100m on Node pe9deep4seen-1 +Aug 24 12:42:54.807: INFO: Pod coredns-787d4945fb-d76z6 requesting resource cpu=100m on Node pe9deep4seen-1 +Aug 24 12:42:54.807: INFO: Pod kube-addon-manager-pe9deep4seen-1 requesting resource cpu=5m on Node pe9deep4seen-1 +Aug 24 12:42:54.807: INFO: Pod kube-addon-manager-pe9deep4seen-2 requesting resource cpu=5m on Node pe9deep4seen-2 +Aug 24 12:42:54.807: INFO: Pod kube-apiserver-pe9deep4seen-1 requesting resource cpu=250m on Node pe9deep4seen-1 +Aug 24 12:42:54.807: INFO: Pod kube-apiserver-pe9deep4seen-2 requesting resource cpu=250m on Node pe9deep4seen-2 +Aug 24 12:42:54.807: INFO: Pod kube-controller-manager-pe9deep4seen-1 requesting resource cpu=200m on Node pe9deep4seen-1 +Aug 24 12:42:54.807: INFO: Pod kube-controller-manager-pe9deep4seen-2 requesting resource cpu=200m on Node pe9deep4seen-2 +Aug 24 12:42:54.807: INFO: Pod kube-proxy-8vv8d requesting resource cpu=0m on Node pe9deep4seen-3 +Aug 24 12:42:54.808: INFO: Pod kube-proxy-lm2dm requesting resource cpu=0m on Node pe9deep4seen-2 +Aug 24 12:42:54.808: INFO: Pod kube-proxy-nr5bs requesting resource cpu=0m on Node pe9deep4seen-1 +Aug 24 12:42:54.812: INFO: Pod kube-scheduler-pe9deep4seen-1 requesting resource cpu=100m on Node pe9deep4seen-1 +Aug 24 12:42:54.812: INFO: Pod kube-scheduler-pe9deep4seen-2 requesting resource cpu=100m on Node pe9deep4seen-2 +Aug 24 12:42:54.812: INFO: Pod sonobuoy requesting resource cpu=0m on Node pe9deep4seen-3 +Aug 24 12:42:54.812: INFO: Pod sonobuoy-e2e-job-b3f52dde3e8a4a4e requesting resource cpu=0m on Node pe9deep4seen-3 +Aug 24 12:42:54.812: INFO: Pod sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw requesting resource cpu=0m on Node pe9deep4seen-1 +Aug 24 12:42:54.812: INFO: Pod sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-nxmsl requesting resource cpu=0m on Node pe9deep4seen-2 +Aug 24 12:42:54.812: INFO: Pod sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-p6l72 requesting resource cpu=0m on Node pe9deep4seen-3 +STEP: Starting Pods to consume most of the cluster CPU. 08/24/23 12:42:54.812 +Aug 24 12:42:54.813: INFO: Creating a pod which consumes cpu=521m on Node pe9deep4seen-1 +Aug 24 12:42:54.845: INFO: Creating a pod which consumes cpu=661m on Node pe9deep4seen-2 +Aug 24 12:42:54.865: INFO: Creating a pod which consumes cpu=1050m on Node pe9deep4seen-3 +Aug 24 12:42:54.887: INFO: Waiting up to 5m0s for pod "filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb" in namespace "sched-pred-4161" to be "running" +Aug 24 12:42:54.901: INFO: Pod "filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb": Phase="Pending", Reason="", readiness=false. Elapsed: 14.170357ms +Aug 24 12:42:56.910: INFO: Pod "filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023291331s +Aug 24 12:42:58.909: INFO: Pod "filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb": Phase="Running", Reason="", readiness=true. Elapsed: 4.022066517s +Aug 24 12:42:58.909: INFO: Pod "filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb" satisfied condition "running" +Aug 24 12:42:58.909: INFO: Waiting up to 5m0s for pod "filler-pod-601963de-77da-4b3a-b77e-f271db96f627" in namespace "sched-pred-4161" to be "running" +Aug 24 12:42:58.916: INFO: Pod "filler-pod-601963de-77da-4b3a-b77e-f271db96f627": Phase="Running", Reason="", readiness=true. Elapsed: 6.579841ms +Aug 24 12:42:58.916: INFO: Pod "filler-pod-601963de-77da-4b3a-b77e-f271db96f627" satisfied condition "running" +Aug 24 12:42:58.916: INFO: Waiting up to 5m0s for pod "filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f" in namespace "sched-pred-4161" to be "running" +Aug 24 12:42:58.922: INFO: Pod "filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f": Phase="Running", Reason="", readiness=true. Elapsed: 6.178732ms +Aug 24 12:42:58.922: INFO: Pod "filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f" satisfied condition "running" +STEP: Creating another pod that requires unavailable amount of CPU. 08/24/23 12:42:58.922 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-601963de-77da-4b3a-b77e-f271db96f627.177e52470b13ca2d], Reason = [Scheduled], Message = [Successfully assigned sched-pred-4161/filler-pod-601963de-77da-4b3a-b77e-f271db96f627 to pe9deep4seen-2] 08/24/23 12:42:58.933 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-601963de-77da-4b3a-b77e-f271db96f627.177e524749338ffe], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 08/24/23 12:42:58.933 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-601963de-77da-4b3a-b77e-f271db96f627.177e52475757cb26], Reason = [Created], Message = [Created container filler-pod-601963de-77da-4b3a-b77e-f271db96f627] 08/24/23 12:42:58.933 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-601963de-77da-4b3a-b77e-f271db96f627.177e52475b5d46b4], Reason = [Started], Message = [Started container filler-pod-601963de-77da-4b3a-b77e-f271db96f627] 08/24/23 12:42:58.933 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f.177e52470e1f8444], Reason = [Scheduled], Message = [Successfully assigned sched-pred-4161/filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f to pe9deep4seen-3] 08/24/23 12:42:58.933 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f.177e524754ae33c0], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 08/24/23 12:42:58.933 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f.177e524765cf3cc4], Reason = [Created], Message = [Created container filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f] 08/24/23 12:42:58.933 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f.177e524767170727], Reason = [Started], Message = [Started container filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f] 08/24/23 12:42:58.933 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb.177e524707e3568a], Reason = [Scheduled], Message = [Successfully assigned sched-pred-4161/filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb to pe9deep4seen-1] 08/24/23 12:42:58.933 +STEP: Considering event: +Type = [Warning], Name = [filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb.177e5247555f45ac], Reason = [FailedMount], Message = [MountVolume.SetUp failed for volume "kube-api-access-xlckz" : failed to sync configmap cache: timed out waiting for the condition] 08/24/23 12:42:58.933 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb.177e5247a23ebd50], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 08/24/23 12:42:58.934 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb.177e5247adc024de], Reason = [Created], Message = [Created container filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb] 08/24/23 12:42:58.934 +STEP: Considering event: +Type = [Normal], Name = [filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb.177e5247b038d325], Reason = [Started], Message = [Started container filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb] 08/24/23 12:42:58.934 +STEP: Considering event: +Type = [Warning], Name = [additional-pod.177e5247f858ea32], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 Insufficient cpu. preemption: 0/3 nodes are available: 3 No preemption victims found for incoming pod..] 08/24/23 12:42:58.955 +STEP: removing the label node off the node pe9deep4seen-1 08/24/23 12:42:59.958 +STEP: verifying the node doesn't have the label node 08/24/23 12:42:59.988 +STEP: removing the label node off the node pe9deep4seen-2 08/24/23 12:42:59.997 +STEP: verifying the node doesn't have the label node 08/24/23 12:43:00.036 +STEP: removing the label node off the node pe9deep4seen-3 08/24/23 12:43:00.045 +STEP: verifying the node doesn't have the label node 08/24/23 12:43:00.085 +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:27.581: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Security Context +Aug 24 12:43:00.173: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:88 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Security Context +[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Security Context +[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "security-context-8668" for this suite. 07/29/23 16:35:27.591 +STEP: Destroying namespace "sched-pred-4161" for this suite. 08/24/23 12:43:00.196 ------------------------------ -• [4.198 seconds] -[sig-node] Security Context -test/e2e/node/framework.go:23 - should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] - test/e2e/node/security_context.go:129 +• [SLOW TEST] [5.777 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +test/e2e/scheduling/framework.go:40 + validates resource limits of pods that are allowed to run [Conformance] + test/e2e/scheduling/predicates.go:331 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Security Context + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:23.42 - Jul 29 16:35:23.420: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename security-context 07/29/23 16:35:23.422 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:23.449 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:23.458 - [BeforeEach] [sig-node] Security Context + STEP: Creating a kubernetes client 08/24/23 12:42:54.437 + Aug 24 12:42:54.437: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sched-pred 08/24/23 12:42:54.444 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:42:54.479 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:42:54.488 + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] - test/e2e/node/security_context.go:129 - STEP: Creating a pod to test pod.Spec.SecurityContext.RunAsUser 07/29/23 16:35:23.464 - Jul 29 16:35:23.478: INFO: Waiting up to 5m0s for pod "security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39" in namespace "security-context-8668" to be "Succeeded or Failed" - Jul 29 16:35:23.488: INFO: Pod "security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39": Phase="Pending", Reason="", readiness=false. Elapsed: 9.606733ms - Jul 29 16:35:25.496: INFO: Pod "security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017775212s - Jul 29 16:35:27.498: INFO: Pod "security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019878815s - STEP: Saw pod success 07/29/23 16:35:27.498 - Jul 29 16:35:27.499: INFO: Pod "security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39" satisfied condition "Succeeded or Failed" - Jul 29 16:35:27.509: INFO: Trying to get logs from node wetuj3nuajog-3 pod security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39 container test-container: - STEP: delete the pod 07/29/23 16:35:27.525 - Jul 29 16:35:27.573: INFO: Waiting for pod security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39 to disappear - Jul 29 16:35:27.580: INFO: Pod security-context-d01dbc5a-40dd-490e-8aca-c3ea9f5efc39 no longer exists - [AfterEach] [sig-node] Security Context + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:97 + Aug 24 12:42:54.496: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready + Aug 24 12:42:54.524: INFO: Waiting for terminating namespaces to be deleted... + Aug 24 12:42:54.532: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-1 before test + Aug 24 12:42:54.565: INFO: cilium-node-init-wqpdx from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.565: INFO: Container node-init ready: true, restart count 0 + Aug 24 12:42:54.565: INFO: cilium-wpzgb from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.565: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 12:42:54.565: INFO: coredns-787d4945fb-8jnm5 from kube-system started at 2023-08-24 11:24:04 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.565: INFO: Container coredns ready: true, restart count 0 + Aug 24 12:42:54.565: INFO: coredns-787d4945fb-d76z6 from kube-system started at 2023-08-24 11:24:07 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.565: INFO: Container coredns ready: true, restart count 0 + Aug 24 12:42:54.565: INFO: kube-addon-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.565: INFO: Container kube-addon-manager ready: true, restart count 0 + Aug 24 12:42:54.566: INFO: kube-apiserver-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.566: INFO: Container kube-apiserver ready: true, restart count 0 + Aug 24 12:42:54.566: INFO: kube-controller-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.566: INFO: Container kube-controller-manager ready: true, restart count 0 + Aug 24 12:42:54.566: INFO: kube-proxy-nr5bs from kube-system started at 2023-08-24 11:21:24 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.566: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 12:42:54.566: INFO: kube-scheduler-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.566: INFO: Container kube-scheduler ready: true, restart count 0 + Aug 24 12:42:54.566: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) + Aug 24 12:42:54.566: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:42:54.566: INFO: Container systemd-logs ready: true, restart count 0 + Aug 24 12:42:54.566: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-2 before test + Aug 24 12:42:54.586: INFO: cilium-node-init-95cbk from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.587: INFO: Container node-init ready: true, restart count 0 + Aug 24 12:42:54.587: INFO: cilium-operator-75f7897945-8qqz2 from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.587: INFO: Container cilium-operator ready: true, restart count 0 + Aug 24 12:42:54.587: INFO: cilium-rcknz from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.587: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 12:42:54.587: INFO: kube-addon-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:37 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.587: INFO: Container kube-addon-manager ready: true, restart count 0 + Aug 24 12:42:54.587: INFO: kube-apiserver-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.587: INFO: Container kube-apiserver ready: true, restart count 0 + Aug 24 12:42:54.588: INFO: kube-controller-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.588: INFO: Container kube-controller-manager ready: true, restart count 0 + Aug 24 12:42:54.588: INFO: kube-proxy-lm2dm from kube-system started at 2023-08-24 11:22:03 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.588: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 12:42:54.588: INFO: kube-scheduler-pe9deep4seen-2 from kube-system started at 2023-08-24 11:25:19 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.588: INFO: Container kube-scheduler ready: true, restart count 0 + Aug 24 12:42:54.588: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-nxmsl from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) + Aug 24 12:42:54.588: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:42:54.588: INFO: Container systemd-logs ready: true, restart count 0 + Aug 24 12:42:54.588: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-3 before test + Aug 24 12:42:54.608: INFO: cilium-node-init-pdcw9 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.609: INFO: Container node-init ready: true, restart count 0 + Aug 24 12:42:54.610: INFO: cilium-xgc44 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.610: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 12:42:54.610: INFO: kube-proxy-8vv8d from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.610: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 12:42:54.610: INFO: sonobuoy from sonobuoy started at 2023-08-24 11:38:19 +0000 UTC (1 container statuses recorded) + Aug 24 12:42:54.611: INFO: Container kube-sonobuoy ready: true, restart count 0 + Aug 24 12:42:54.611: INFO: sonobuoy-e2e-job-b3f52dde3e8a4a4e from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) + Aug 24 12:42:54.611: INFO: Container e2e ready: true, restart count 0 + Aug 24 12:42:54.611: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:42:54.611: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-p6l72 from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) + Aug 24 12:42:54.611: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:42:54.611: INFO: Container systemd-logs ready: true, restart count 0 + [It] validates resource limits of pods that are allowed to run [Conformance] + test/e2e/scheduling/predicates.go:331 + STEP: verifying the node has the label node pe9deep4seen-1 08/24/23 12:42:54.68 + STEP: verifying the node has the label node pe9deep4seen-2 08/24/23 12:42:54.723 + STEP: verifying the node has the label node pe9deep4seen-3 08/24/23 12:42:54.748 + Aug 24 12:42:54.806: INFO: Pod cilium-node-init-95cbk requesting resource cpu=100m on Node pe9deep4seen-2 + Aug 24 12:42:54.806: INFO: Pod cilium-node-init-pdcw9 requesting resource cpu=100m on Node pe9deep4seen-3 + Aug 24 12:42:54.806: INFO: Pod cilium-node-init-wqpdx requesting resource cpu=100m on Node pe9deep4seen-1 + Aug 24 12:42:54.806: INFO: Pod cilium-operator-75f7897945-8qqz2 requesting resource cpu=0m on Node pe9deep4seen-2 + Aug 24 12:42:54.807: INFO: Pod cilium-rcknz requesting resource cpu=0m on Node pe9deep4seen-2 + Aug 24 12:42:54.807: INFO: Pod cilium-wpzgb requesting resource cpu=0m on Node pe9deep4seen-1 + Aug 24 12:42:54.807: INFO: Pod cilium-xgc44 requesting resource cpu=0m on Node pe9deep4seen-3 + Aug 24 12:42:54.807: INFO: Pod coredns-787d4945fb-8jnm5 requesting resource cpu=100m on Node pe9deep4seen-1 + Aug 24 12:42:54.807: INFO: Pod coredns-787d4945fb-d76z6 requesting resource cpu=100m on Node pe9deep4seen-1 + Aug 24 12:42:54.807: INFO: Pod kube-addon-manager-pe9deep4seen-1 requesting resource cpu=5m on Node pe9deep4seen-1 + Aug 24 12:42:54.807: INFO: Pod kube-addon-manager-pe9deep4seen-2 requesting resource cpu=5m on Node pe9deep4seen-2 + Aug 24 12:42:54.807: INFO: Pod kube-apiserver-pe9deep4seen-1 requesting resource cpu=250m on Node pe9deep4seen-1 + Aug 24 12:42:54.807: INFO: Pod kube-apiserver-pe9deep4seen-2 requesting resource cpu=250m on Node pe9deep4seen-2 + Aug 24 12:42:54.807: INFO: Pod kube-controller-manager-pe9deep4seen-1 requesting resource cpu=200m on Node pe9deep4seen-1 + Aug 24 12:42:54.807: INFO: Pod kube-controller-manager-pe9deep4seen-2 requesting resource cpu=200m on Node pe9deep4seen-2 + Aug 24 12:42:54.807: INFO: Pod kube-proxy-8vv8d requesting resource cpu=0m on Node pe9deep4seen-3 + Aug 24 12:42:54.808: INFO: Pod kube-proxy-lm2dm requesting resource cpu=0m on Node pe9deep4seen-2 + Aug 24 12:42:54.808: INFO: Pod kube-proxy-nr5bs requesting resource cpu=0m on Node pe9deep4seen-1 + Aug 24 12:42:54.812: INFO: Pod kube-scheduler-pe9deep4seen-1 requesting resource cpu=100m on Node pe9deep4seen-1 + Aug 24 12:42:54.812: INFO: Pod kube-scheduler-pe9deep4seen-2 requesting resource cpu=100m on Node pe9deep4seen-2 + Aug 24 12:42:54.812: INFO: Pod sonobuoy requesting resource cpu=0m on Node pe9deep4seen-3 + Aug 24 12:42:54.812: INFO: Pod sonobuoy-e2e-job-b3f52dde3e8a4a4e requesting resource cpu=0m on Node pe9deep4seen-3 + Aug 24 12:42:54.812: INFO: Pod sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw requesting resource cpu=0m on Node pe9deep4seen-1 + Aug 24 12:42:54.812: INFO: Pod sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-nxmsl requesting resource cpu=0m on Node pe9deep4seen-2 + Aug 24 12:42:54.812: INFO: Pod sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-p6l72 requesting resource cpu=0m on Node pe9deep4seen-3 + STEP: Starting Pods to consume most of the cluster CPU. 08/24/23 12:42:54.812 + Aug 24 12:42:54.813: INFO: Creating a pod which consumes cpu=521m on Node pe9deep4seen-1 + Aug 24 12:42:54.845: INFO: Creating a pod which consumes cpu=661m on Node pe9deep4seen-2 + Aug 24 12:42:54.865: INFO: Creating a pod which consumes cpu=1050m on Node pe9deep4seen-3 + Aug 24 12:42:54.887: INFO: Waiting up to 5m0s for pod "filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb" in namespace "sched-pred-4161" to be "running" + Aug 24 12:42:54.901: INFO: Pod "filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb": Phase="Pending", Reason="", readiness=false. Elapsed: 14.170357ms + Aug 24 12:42:56.910: INFO: Pod "filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023291331s + Aug 24 12:42:58.909: INFO: Pod "filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb": Phase="Running", Reason="", readiness=true. Elapsed: 4.022066517s + Aug 24 12:42:58.909: INFO: Pod "filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb" satisfied condition "running" + Aug 24 12:42:58.909: INFO: Waiting up to 5m0s for pod "filler-pod-601963de-77da-4b3a-b77e-f271db96f627" in namespace "sched-pred-4161" to be "running" + Aug 24 12:42:58.916: INFO: Pod "filler-pod-601963de-77da-4b3a-b77e-f271db96f627": Phase="Running", Reason="", readiness=true. Elapsed: 6.579841ms + Aug 24 12:42:58.916: INFO: Pod "filler-pod-601963de-77da-4b3a-b77e-f271db96f627" satisfied condition "running" + Aug 24 12:42:58.916: INFO: Waiting up to 5m0s for pod "filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f" in namespace "sched-pred-4161" to be "running" + Aug 24 12:42:58.922: INFO: Pod "filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f": Phase="Running", Reason="", readiness=true. Elapsed: 6.178732ms + Aug 24 12:42:58.922: INFO: Pod "filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f" satisfied condition "running" + STEP: Creating another pod that requires unavailable amount of CPU. 08/24/23 12:42:58.922 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-601963de-77da-4b3a-b77e-f271db96f627.177e52470b13ca2d], Reason = [Scheduled], Message = [Successfully assigned sched-pred-4161/filler-pod-601963de-77da-4b3a-b77e-f271db96f627 to pe9deep4seen-2] 08/24/23 12:42:58.933 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-601963de-77da-4b3a-b77e-f271db96f627.177e524749338ffe], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 08/24/23 12:42:58.933 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-601963de-77da-4b3a-b77e-f271db96f627.177e52475757cb26], Reason = [Created], Message = [Created container filler-pod-601963de-77da-4b3a-b77e-f271db96f627] 08/24/23 12:42:58.933 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-601963de-77da-4b3a-b77e-f271db96f627.177e52475b5d46b4], Reason = [Started], Message = [Started container filler-pod-601963de-77da-4b3a-b77e-f271db96f627] 08/24/23 12:42:58.933 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f.177e52470e1f8444], Reason = [Scheduled], Message = [Successfully assigned sched-pred-4161/filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f to pe9deep4seen-3] 08/24/23 12:42:58.933 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f.177e524754ae33c0], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 08/24/23 12:42:58.933 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f.177e524765cf3cc4], Reason = [Created], Message = [Created container filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f] 08/24/23 12:42:58.933 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f.177e524767170727], Reason = [Started], Message = [Started container filler-pod-afff7594-3235-4b2c-8dc2-7226abf72f4f] 08/24/23 12:42:58.933 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb.177e524707e3568a], Reason = [Scheduled], Message = [Successfully assigned sched-pred-4161/filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb to pe9deep4seen-1] 08/24/23 12:42:58.933 + STEP: Considering event: + Type = [Warning], Name = [filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb.177e5247555f45ac], Reason = [FailedMount], Message = [MountVolume.SetUp failed for volume "kube-api-access-xlckz" : failed to sync configmap cache: timed out waiting for the condition] 08/24/23 12:42:58.933 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb.177e5247a23ebd50], Reason = [Pulled], Message = [Container image "registry.k8s.io/pause:3.9" already present on machine] 08/24/23 12:42:58.934 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb.177e5247adc024de], Reason = [Created], Message = [Created container filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb] 08/24/23 12:42:58.934 + STEP: Considering event: + Type = [Normal], Name = [filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb.177e5247b038d325], Reason = [Started], Message = [Started container filler-pod-f0b264fb-b5e6-4a9e-86b2-1704662572fb] 08/24/23 12:42:58.934 + STEP: Considering event: + Type = [Warning], Name = [additional-pod.177e5247f858ea32], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 Insufficient cpu. preemption: 0/3 nodes are available: 3 No preemption victims found for incoming pod..] 08/24/23 12:42:58.955 + STEP: removing the label node off the node pe9deep4seen-1 08/24/23 12:42:59.958 + STEP: verifying the node doesn't have the label node 08/24/23 12:42:59.988 + STEP: removing the label node off the node pe9deep4seen-2 08/24/23 12:42:59.997 + STEP: verifying the node doesn't have the label node 08/24/23 12:43:00.036 + STEP: removing the label node off the node pe9deep4seen-3 08/24/23 12:43:00.045 + STEP: verifying the node doesn't have the label node 08/24/23 12:43:00.085 + [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:27.581: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Security Context + Aug 24 12:43:00.173: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:88 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Security Context + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Security Context + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "security-context-8668" for this suite. 07/29/23 16:35:27.591 + STEP: Destroying namespace "sched-pred-4161" for this suite. 08/24/23 12:43:00.196 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] ReplicaSet - should list and delete a collection of ReplicaSets [Conformance] - test/e2e/apps/replica_set.go:165 -[BeforeEach] [sig-apps] ReplicaSet +[sig-node] Kubelet when scheduling an agnhost Pod with hostAliases + should write entries to /etc/hosts [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:148 +[BeforeEach] [sig-node] Kubelet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:27.645 -Jul 29 16:35:27.646: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replicaset 07/29/23 16:35:27.648 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:27.715 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:27.723 -[BeforeEach] [sig-apps] ReplicaSet +STEP: Creating a kubernetes client 08/24/23 12:43:00.232 +Aug 24 12:43:00.233: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubelet-test 08/24/23 12:43:00.236 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:00.282 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:00.292 +[BeforeEach] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:31 -[It] should list and delete a collection of ReplicaSets [Conformance] - test/e2e/apps/replica_set.go:165 -STEP: Create a ReplicaSet 07/29/23 16:35:27.731 -STEP: Verify that the required pods have come up 07/29/23 16:35:27.756 -Jul 29 16:35:27.765: INFO: Pod name sample-pod: Found 0 pods out of 3 -Jul 29 16:35:32.778: INFO: Pod name sample-pod: Found 3 pods out of 3 -STEP: ensuring each pod is running 07/29/23 16:35:32.778 -Jul 29 16:35:32.788: INFO: Replica Status: {Replicas:3 FullyLabeledReplicas:3 ReadyReplicas:3 AvailableReplicas:3 ObservedGeneration:1 Conditions:[]} -STEP: Listing all ReplicaSets 07/29/23 16:35:32.788 -STEP: DeleteCollection of the ReplicaSets 07/29/23 16:35:32.799 -STEP: After DeleteCollection verify that ReplicaSets have been deleted 07/29/23 16:35:32.821 -[AfterEach] [sig-apps] ReplicaSet +[BeforeEach] [sig-node] Kubelet + test/e2e/common/node/kubelet.go:41 +[It] should write entries to /etc/hosts [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:148 +STEP: Waiting for pod completion 08/24/23 12:43:00.324 +Aug 24 12:43:00.325: INFO: Waiting up to 3m0s for pod "agnhost-host-aliasesefcf80d1-050b-4190-a964-80706e658524" in namespace "kubelet-test-5915" to be "completed" +Aug 24 12:43:00.342: INFO: Pod "agnhost-host-aliasesefcf80d1-050b-4190-a964-80706e658524": Phase="Pending", Reason="", readiness=false. Elapsed: 17.285924ms +Aug 24 12:43:02.354: INFO: Pod "agnhost-host-aliasesefcf80d1-050b-4190-a964-80706e658524": Phase="Pending", Reason="", readiness=false. Elapsed: 2.028509458s +Aug 24 12:43:04.352: INFO: Pod "agnhost-host-aliasesefcf80d1-050b-4190-a964-80706e658524": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026690629s +Aug 24 12:43:04.352: INFO: Pod "agnhost-host-aliasesefcf80d1-050b-4190-a964-80706e658524" satisfied condition "completed" +[AfterEach] [sig-node] Kubelet test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:32.830: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ReplicaSet +Aug 24 12:43:04.367: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ReplicaSet +[DeferCleanup (Each)] [sig-node] Kubelet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ReplicaSet +[DeferCleanup (Each)] [sig-node] Kubelet tear down framework | framework.go:193 -STEP: Destroying namespace "replicaset-5042" for this suite. 07/29/23 16:35:32.84 +STEP: Destroying namespace "kubelet-test-5915" for this suite. 08/24/23 12:43:04.383 ------------------------------ -• [SLOW TEST] [5.295 seconds] -[sig-apps] ReplicaSet -test/e2e/apps/framework.go:23 - should list and delete a collection of ReplicaSets [Conformance] - test/e2e/apps/replica_set.go:165 +• [4.168 seconds] +[sig-node] Kubelet +test/e2e/common/node/framework.go:23 + when scheduling an agnhost Pod with hostAliases + test/e2e/common/node/kubelet.go:140 + should write entries to /etc/hosts [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:148 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ReplicaSet + [BeforeEach] [sig-node] Kubelet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:27.645 - Jul 29 16:35:27.646: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replicaset 07/29/23 16:35:27.648 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:27.715 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:27.723 - [BeforeEach] [sig-apps] ReplicaSet + STEP: Creating a kubernetes client 08/24/23 12:43:00.232 + Aug 24 12:43:00.233: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubelet-test 08/24/23 12:43:00.236 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:00.282 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:00.292 + [BeforeEach] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:31 - [It] should list and delete a collection of ReplicaSets [Conformance] - test/e2e/apps/replica_set.go:165 - STEP: Create a ReplicaSet 07/29/23 16:35:27.731 - STEP: Verify that the required pods have come up 07/29/23 16:35:27.756 - Jul 29 16:35:27.765: INFO: Pod name sample-pod: Found 0 pods out of 3 - Jul 29 16:35:32.778: INFO: Pod name sample-pod: Found 3 pods out of 3 - STEP: ensuring each pod is running 07/29/23 16:35:32.778 - Jul 29 16:35:32.788: INFO: Replica Status: {Replicas:3 FullyLabeledReplicas:3 ReadyReplicas:3 AvailableReplicas:3 ObservedGeneration:1 Conditions:[]} - STEP: Listing all ReplicaSets 07/29/23 16:35:32.788 - STEP: DeleteCollection of the ReplicaSets 07/29/23 16:35:32.799 - STEP: After DeleteCollection verify that ReplicaSets have been deleted 07/29/23 16:35:32.821 - [AfterEach] [sig-apps] ReplicaSet + [BeforeEach] [sig-node] Kubelet + test/e2e/common/node/kubelet.go:41 + [It] should write entries to /etc/hosts [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:148 + STEP: Waiting for pod completion 08/24/23 12:43:00.324 + Aug 24 12:43:00.325: INFO: Waiting up to 3m0s for pod "agnhost-host-aliasesefcf80d1-050b-4190-a964-80706e658524" in namespace "kubelet-test-5915" to be "completed" + Aug 24 12:43:00.342: INFO: Pod "agnhost-host-aliasesefcf80d1-050b-4190-a964-80706e658524": Phase="Pending", Reason="", readiness=false. Elapsed: 17.285924ms + Aug 24 12:43:02.354: INFO: Pod "agnhost-host-aliasesefcf80d1-050b-4190-a964-80706e658524": Phase="Pending", Reason="", readiness=false. Elapsed: 2.028509458s + Aug 24 12:43:04.352: INFO: Pod "agnhost-host-aliasesefcf80d1-050b-4190-a964-80706e658524": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026690629s + Aug 24 12:43:04.352: INFO: Pod "agnhost-host-aliasesefcf80d1-050b-4190-a964-80706e658524" satisfied condition "completed" + [AfterEach] [sig-node] Kubelet test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:32.830: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ReplicaSet + Aug 24 12:43:04.367: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ReplicaSet + [DeferCleanup (Each)] [sig-node] Kubelet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ReplicaSet + [DeferCleanup (Each)] [sig-node] Kubelet tear down framework | framework.go:193 - STEP: Destroying namespace "replicaset-5042" for this suite. 07/29/23 16:35:32.84 + STEP: Destroying namespace "kubelet-test-5915" for this suite. 08/24/23 12:43:04.383 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-node] Secrets - should fail to create secret due to empty secret key [Conformance] - test/e2e/common/node/secrets.go:140 + should patch a secret [Conformance] + test/e2e/common/node/secrets.go:154 [BeforeEach] [sig-node] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:32.943 -Jul 29 16:35:32.943: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 16:35:32.947 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:33.065 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:33.088 +STEP: Creating a kubernetes client 08/24/23 12:43:04.416 +Aug 24 12:43:04.417: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 12:43:04.42 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:04.45 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:04.456 [BeforeEach] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:31 -[It] should fail to create secret due to empty secret key [Conformance] - test/e2e/common/node/secrets.go:140 -STEP: Creating projection with secret that has name secret-emptykey-test-56477b43-2289-46bc-af7e-cbae1d94e20e 07/29/23 16:35:33.099 +[It] should patch a secret [Conformance] + test/e2e/common/node/secrets.go:154 +STEP: creating a secret 08/24/23 12:43:04.462 +STEP: listing secrets in all namespaces to ensure that there are more than zero 08/24/23 12:43:04.473 +STEP: patching the secret 08/24/23 12:43:04.48 +STEP: deleting the secret using a LabelSelector 08/24/23 12:43:04.501 +STEP: listing secrets in all namespaces, searching for label name and value in patch 08/24/23 12:43:04.532 [AfterEach] [sig-node] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:33.105: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:43:04.539: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Secrets dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-3662" for this suite. 07/29/23 16:35:33.117 +STEP: Destroying namespace "secrets-5976" for this suite. 08/24/23 12:43:04.546 ------------------------------ -• [0.203 seconds] +• [0.153 seconds] [sig-node] Secrets test/e2e/common/node/framework.go:23 - should fail to create secret due to empty secret key [Conformance] - test/e2e/common/node/secrets.go:140 + should patch a secret [Conformance] + test/e2e/common/node/secrets.go:154 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-node] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:32.943 - Jul 29 16:35:32.943: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 16:35:32.947 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:33.065 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:33.088 + STEP: Creating a kubernetes client 08/24/23 12:43:04.416 + Aug 24 12:43:04.417: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 12:43:04.42 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:04.45 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:04.456 [BeforeEach] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:31 - [It] should fail to create secret due to empty secret key [Conformance] - test/e2e/common/node/secrets.go:140 - STEP: Creating projection with secret that has name secret-emptykey-test-56477b43-2289-46bc-af7e-cbae1d94e20e 07/29/23 16:35:33.099 + [It] should patch a secret [Conformance] + test/e2e/common/node/secrets.go:154 + STEP: creating a secret 08/24/23 12:43:04.462 + STEP: listing secrets in all namespaces to ensure that there are more than zero 08/24/23 12:43:04.473 + STEP: patching the secret 08/24/23 12:43:04.48 + STEP: deleting the secret using a LabelSelector 08/24/23 12:43:04.501 + STEP: listing secrets in all namespaces, searching for label name and value in patch 08/24/23 12:43:04.532 [AfterEach] [sig-node] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:33.105: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:43:04.539: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Secrets dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-3662" for this suite. 07/29/23 16:35:33.117 + STEP: Destroying namespace "secrets-5976" for this suite. 08/24/23 12:43:04.546 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] ConfigMap - should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:47 -[BeforeEach] [sig-storage] ConfigMap +[sig-node] Containers + should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:59 +[BeforeEach] [sig-node] Containers set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:33.148 -Jul 29 16:35:33.148: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:35:33.151 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:33.187 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:33.193 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 12:43:04.567 +Aug 24 12:43:04.568: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename containers 08/24/23 12:43:04.57 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:04.602 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:04.609 +[BeforeEach] [sig-node] Containers test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:47 -STEP: Creating configMap with name configmap-test-volume-3800e4fa-0ee5-4e8a-a12c-b504ea363898 07/29/23 16:35:33.202 -STEP: Creating a pod to test consume configMaps 07/29/23 16:35:33.223 -Jul 29 16:35:33.252: INFO: Waiting up to 5m0s for pod "pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4" in namespace "configmap-7305" to be "Succeeded or Failed" -Jul 29 16:35:33.268: INFO: Pod "pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4": Phase="Pending", Reason="", readiness=false. Elapsed: 15.403701ms -Jul 29 16:35:35.276: INFO: Pod "pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023298592s -Jul 29 16:35:37.277: INFO: Pod "pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024514602s -STEP: Saw pod success 07/29/23 16:35:37.277 -Jul 29 16:35:37.278: INFO: Pod "pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4" satisfied condition "Succeeded or Failed" -Jul 29 16:35:37.285: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4 container agnhost-container: -STEP: delete the pod 07/29/23 16:35:37.297 -Jul 29 16:35:37.325: INFO: Waiting for pod pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4 to disappear -Jul 29 16:35:37.331: INFO: Pod pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4 no longer exists -[AfterEach] [sig-storage] ConfigMap +[It] should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:59 +STEP: Creating a pod to test override arguments 08/24/23 12:43:04.614 +Aug 24 12:43:04.633: INFO: Waiting up to 5m0s for pod "client-containers-4f406389-df77-4fb5-86de-01dcee496d77" in namespace "containers-8272" to be "Succeeded or Failed" +Aug 24 12:43:04.645: INFO: Pod "client-containers-4f406389-df77-4fb5-86de-01dcee496d77": Phase="Pending", Reason="", readiness=false. Elapsed: 11.764151ms +Aug 24 12:43:06.656: INFO: Pod "client-containers-4f406389-df77-4fb5-86de-01dcee496d77": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022686723s +Aug 24 12:43:08.651: INFO: Pod "client-containers-4f406389-df77-4fb5-86de-01dcee496d77": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017847416s +STEP: Saw pod success 08/24/23 12:43:08.651 +Aug 24 12:43:08.652: INFO: Pod "client-containers-4f406389-df77-4fb5-86de-01dcee496d77" satisfied condition "Succeeded or Failed" +Aug 24 12:43:08.658: INFO: Trying to get logs from node pe9deep4seen-3 pod client-containers-4f406389-df77-4fb5-86de-01dcee496d77 container agnhost-container: +STEP: delete the pod 08/24/23 12:43:08.669 +Aug 24 12:43:08.687: INFO: Waiting for pod client-containers-4f406389-df77-4fb5-86de-01dcee496d77 to disappear +Aug 24 12:43:08.691: INFO: Pod client-containers-4f406389-df77-4fb5-86de-01dcee496d77 no longer exists +[AfterEach] [sig-node] Containers test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:37.331: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 12:43:08.692: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Containers test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-node] Containers dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-node] Containers tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-7305" for this suite. 07/29/23 16:35:37.34 +STEP: Destroying namespace "containers-8272" for this suite. 08/24/23 12:43:08.7 ------------------------------ -• [4.215 seconds] -[sig-storage] ConfigMap -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:47 +• [4.146 seconds] +[sig-node] Containers +test/e2e/common/node/framework.go:23 + should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:59 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-node] Containers set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:33.148 - Jul 29 16:35:33.148: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:35:33.151 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:33.187 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:33.193 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 12:43:04.567 + Aug 24 12:43:04.568: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename containers 08/24/23 12:43:04.57 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:04.602 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:04.609 + [BeforeEach] [sig-node] Containers test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:47 - STEP: Creating configMap with name configmap-test-volume-3800e4fa-0ee5-4e8a-a12c-b504ea363898 07/29/23 16:35:33.202 - STEP: Creating a pod to test consume configMaps 07/29/23 16:35:33.223 - Jul 29 16:35:33.252: INFO: Waiting up to 5m0s for pod "pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4" in namespace "configmap-7305" to be "Succeeded or Failed" - Jul 29 16:35:33.268: INFO: Pod "pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4": Phase="Pending", Reason="", readiness=false. Elapsed: 15.403701ms - Jul 29 16:35:35.276: INFO: Pod "pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023298592s - Jul 29 16:35:37.277: INFO: Pod "pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024514602s - STEP: Saw pod success 07/29/23 16:35:37.277 - Jul 29 16:35:37.278: INFO: Pod "pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4" satisfied condition "Succeeded or Failed" - Jul 29 16:35:37.285: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4 container agnhost-container: - STEP: delete the pod 07/29/23 16:35:37.297 - Jul 29 16:35:37.325: INFO: Waiting for pod pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4 to disappear - Jul 29 16:35:37.331: INFO: Pod pod-configmaps-4ed0c9cf-aadb-4a15-ad53-6b7b5b6a6bc4 no longer exists - [AfterEach] [sig-storage] ConfigMap + [It] should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:59 + STEP: Creating a pod to test override arguments 08/24/23 12:43:04.614 + Aug 24 12:43:04.633: INFO: Waiting up to 5m0s for pod "client-containers-4f406389-df77-4fb5-86de-01dcee496d77" in namespace "containers-8272" to be "Succeeded or Failed" + Aug 24 12:43:04.645: INFO: Pod "client-containers-4f406389-df77-4fb5-86de-01dcee496d77": Phase="Pending", Reason="", readiness=false. Elapsed: 11.764151ms + Aug 24 12:43:06.656: INFO: Pod "client-containers-4f406389-df77-4fb5-86de-01dcee496d77": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022686723s + Aug 24 12:43:08.651: INFO: Pod "client-containers-4f406389-df77-4fb5-86de-01dcee496d77": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017847416s + STEP: Saw pod success 08/24/23 12:43:08.651 + Aug 24 12:43:08.652: INFO: Pod "client-containers-4f406389-df77-4fb5-86de-01dcee496d77" satisfied condition "Succeeded or Failed" + Aug 24 12:43:08.658: INFO: Trying to get logs from node pe9deep4seen-3 pod client-containers-4f406389-df77-4fb5-86de-01dcee496d77 container agnhost-container: + STEP: delete the pod 08/24/23 12:43:08.669 + Aug 24 12:43:08.687: INFO: Waiting for pod client-containers-4f406389-df77-4fb5-86de-01dcee496d77 to disappear + Aug 24 12:43:08.691: INFO: Pod client-containers-4f406389-df77-4fb5-86de-01dcee496d77 no longer exists + [AfterEach] [sig-node] Containers test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:37.331: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 12:43:08.692: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Containers test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-node] Containers dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-node] Containers tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-7305" for this suite. 07/29/23 16:35:37.34 + STEP: Destroying namespace "containers-8272" for this suite. 08/24/23 12:43:08.7 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSS ------------------------------ -[sig-node] Probing container - should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:169 -[BeforeEach] [sig-node] Probing container +[sig-network] Proxy version v1 + A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] + test/e2e/network/proxy.go:286 +[BeforeEach] version v1 set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:37.375 -Jul 29 16:35:37.375: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-probe 07/29/23 16:35:37.377 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:37.413 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:37.426 -[BeforeEach] [sig-node] Probing container +STEP: Creating a kubernetes client 08/24/23 12:43:08.719 +Aug 24 12:43:08.720: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename proxy 08/24/23 12:43:08.722 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:08.75 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:08.757 +[BeforeEach] version v1 test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 -[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:169 -STEP: Creating pod liveness-44e0d424-c2db-4164-8020-f3236bdc2374 in namespace container-probe-3284 07/29/23 16:35:37.432 -Jul 29 16:35:37.445: INFO: Waiting up to 5m0s for pod "liveness-44e0d424-c2db-4164-8020-f3236bdc2374" in namespace "container-probe-3284" to be "not pending" -Jul 29 16:35:37.450: INFO: Pod "liveness-44e0d424-c2db-4164-8020-f3236bdc2374": Phase="Pending", Reason="", readiness=false. Elapsed: 5.226479ms -Jul 29 16:35:39.459: INFO: Pod "liveness-44e0d424-c2db-4164-8020-f3236bdc2374": Phase="Running", Reason="", readiness=true. Elapsed: 2.01404571s -Jul 29 16:35:39.459: INFO: Pod "liveness-44e0d424-c2db-4164-8020-f3236bdc2374" satisfied condition "not pending" -Jul 29 16:35:39.459: INFO: Started pod liveness-44e0d424-c2db-4164-8020-f3236bdc2374 in namespace container-probe-3284 -STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 16:35:39.459 -Jul 29 16:35:39.465: INFO: Initial restart count of pod liveness-44e0d424-c2db-4164-8020-f3236bdc2374 is 0 -Jul 29 16:35:59.578: INFO: Restart count of pod container-probe-3284/liveness-44e0d424-c2db-4164-8020-f3236bdc2374 is now 1 (20.112665453s elapsed) -STEP: deleting the pod 07/29/23 16:35:59.578 -[AfterEach] [sig-node] Probing container +[It] A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] + test/e2e/network/proxy.go:286 +Aug 24 12:43:08.762: INFO: Creating pod... +Aug 24 12:43:08.781: INFO: Waiting up to 5m0s for pod "agnhost" in namespace "proxy-6133" to be "running" +Aug 24 12:43:08.794: INFO: Pod "agnhost": Phase="Pending", Reason="", readiness=false. Elapsed: 13.631026ms +Aug 24 12:43:10.801: INFO: Pod "agnhost": Phase="Running", Reason="", readiness=true. Elapsed: 2.019921321s +Aug 24 12:43:10.801: INFO: Pod "agnhost" satisfied condition "running" +Aug 24 12:43:10.801: INFO: Creating service... +Aug 24 12:43:10.818: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/DELETE +Aug 24 12:43:10.835: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE +Aug 24 12:43:10.836: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/GET +Aug 24 12:43:10.845: INFO: http.Client request:GET | StatusCode:200 | Response:foo | Method:GET +Aug 24 12:43:10.846: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/HEAD +Aug 24 12:43:10.852: INFO: http.Client request:HEAD | StatusCode:200 +Aug 24 12:43:10.853: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/OPTIONS +Aug 24 12:43:10.859: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS +Aug 24 12:43:10.859: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/PATCH +Aug 24 12:43:10.865: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH +Aug 24 12:43:10.865: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/POST +Aug 24 12:43:10.878: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST +Aug 24 12:43:10.879: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/PUT +Aug 24 12:43:10.887: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT +Aug 24 12:43:10.887: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/DELETE +Aug 24 12:43:10.897: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE +Aug 24 12:43:10.897: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/GET +Aug 24 12:43:10.911: INFO: http.Client request:GET | StatusCode:200 | Response:foo | Method:GET +Aug 24 12:43:10.911: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/HEAD +Aug 24 12:43:10.922: INFO: http.Client request:HEAD | StatusCode:200 +Aug 24 12:43:10.922: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/OPTIONS +Aug 24 12:43:10.933: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS +Aug 24 12:43:10.933: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/PATCH +Aug 24 12:43:10.943: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH +Aug 24 12:43:10.943: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/POST +Aug 24 12:43:10.961: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST +Aug 24 12:43:10.961: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/PUT +Aug 24 12:43:10.976: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT +[AfterEach] version v1 test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:59.601: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Probing container +Aug 24 12:43:10.977: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] version v1 test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] version v1 dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] version v1 tear down framework | framework.go:193 -STEP: Destroying namespace "container-probe-3284" for this suite. 07/29/23 16:35:59.63 +STEP: Destroying namespace "proxy-6133" for this suite. 08/24/23 12:43:10.985 ------------------------------ -• [SLOW TEST] [22.270 seconds] -[sig-node] Probing container -test/e2e/common/node/framework.go:23 - should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:169 +• [2.280 seconds] +[sig-network] Proxy +test/e2e/network/common/framework.go:23 + version v1 + test/e2e/network/proxy.go:74 + A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] + test/e2e/network/proxy.go:286 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Probing container + [BeforeEach] version v1 set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:37.375 - Jul 29 16:35:37.375: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-probe 07/29/23 16:35:37.377 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:37.413 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:37.426 - [BeforeEach] [sig-node] Probing container + STEP: Creating a kubernetes client 08/24/23 12:43:08.719 + Aug 24 12:43:08.720: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename proxy 08/24/23 12:43:08.722 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:08.75 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:08.757 + [BeforeEach] version v1 test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 - [It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:169 - STEP: Creating pod liveness-44e0d424-c2db-4164-8020-f3236bdc2374 in namespace container-probe-3284 07/29/23 16:35:37.432 - Jul 29 16:35:37.445: INFO: Waiting up to 5m0s for pod "liveness-44e0d424-c2db-4164-8020-f3236bdc2374" in namespace "container-probe-3284" to be "not pending" - Jul 29 16:35:37.450: INFO: Pod "liveness-44e0d424-c2db-4164-8020-f3236bdc2374": Phase="Pending", Reason="", readiness=false. Elapsed: 5.226479ms - Jul 29 16:35:39.459: INFO: Pod "liveness-44e0d424-c2db-4164-8020-f3236bdc2374": Phase="Running", Reason="", readiness=true. Elapsed: 2.01404571s - Jul 29 16:35:39.459: INFO: Pod "liveness-44e0d424-c2db-4164-8020-f3236bdc2374" satisfied condition "not pending" - Jul 29 16:35:39.459: INFO: Started pod liveness-44e0d424-c2db-4164-8020-f3236bdc2374 in namespace container-probe-3284 - STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 16:35:39.459 - Jul 29 16:35:39.465: INFO: Initial restart count of pod liveness-44e0d424-c2db-4164-8020-f3236bdc2374 is 0 - Jul 29 16:35:59.578: INFO: Restart count of pod container-probe-3284/liveness-44e0d424-c2db-4164-8020-f3236bdc2374 is now 1 (20.112665453s elapsed) - STEP: deleting the pod 07/29/23 16:35:59.578 - [AfterEach] [sig-node] Probing container + [It] A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] + test/e2e/network/proxy.go:286 + Aug 24 12:43:08.762: INFO: Creating pod... + Aug 24 12:43:08.781: INFO: Waiting up to 5m0s for pod "agnhost" in namespace "proxy-6133" to be "running" + Aug 24 12:43:08.794: INFO: Pod "agnhost": Phase="Pending", Reason="", readiness=false. Elapsed: 13.631026ms + Aug 24 12:43:10.801: INFO: Pod "agnhost": Phase="Running", Reason="", readiness=true. Elapsed: 2.019921321s + Aug 24 12:43:10.801: INFO: Pod "agnhost" satisfied condition "running" + Aug 24 12:43:10.801: INFO: Creating service... + Aug 24 12:43:10.818: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/DELETE + Aug 24 12:43:10.835: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE + Aug 24 12:43:10.836: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/GET + Aug 24 12:43:10.845: INFO: http.Client request:GET | StatusCode:200 | Response:foo | Method:GET + Aug 24 12:43:10.846: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/HEAD + Aug 24 12:43:10.852: INFO: http.Client request:HEAD | StatusCode:200 + Aug 24 12:43:10.853: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/OPTIONS + Aug 24 12:43:10.859: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS + Aug 24 12:43:10.859: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/PATCH + Aug 24 12:43:10.865: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH + Aug 24 12:43:10.865: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/POST + Aug 24 12:43:10.878: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST + Aug 24 12:43:10.879: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/pods/agnhost/proxy/some/path/with/PUT + Aug 24 12:43:10.887: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT + Aug 24 12:43:10.887: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/DELETE + Aug 24 12:43:10.897: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE + Aug 24 12:43:10.897: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/GET + Aug 24 12:43:10.911: INFO: http.Client request:GET | StatusCode:200 | Response:foo | Method:GET + Aug 24 12:43:10.911: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/HEAD + Aug 24 12:43:10.922: INFO: http.Client request:HEAD | StatusCode:200 + Aug 24 12:43:10.922: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/OPTIONS + Aug 24 12:43:10.933: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS + Aug 24 12:43:10.933: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/PATCH + Aug 24 12:43:10.943: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH + Aug 24 12:43:10.943: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/POST + Aug 24 12:43:10.961: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST + Aug 24 12:43:10.961: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-6133/services/test-service/proxy/some/path/with/PUT + Aug 24 12:43:10.976: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT + [AfterEach] version v1 test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:59.601: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Probing container + Aug 24 12:43:10.977: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] version v1 test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] version v1 dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] version v1 tear down framework | framework.go:193 - STEP: Destroying namespace "container-probe-3284" for this suite. 07/29/23 16:35:59.63 + STEP: Destroying namespace "proxy-6133" for this suite. 08/24/23 12:43:10.985 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] RuntimeClass - should support RuntimeClasses API operations [Conformance] - test/e2e/common/node/runtimeclass.go:189 -[BeforeEach] [sig-node] RuntimeClass +[sig-storage] Secrets + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:79 +[BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:59.649 -Jul 29 16:35:59.650: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename runtimeclass 07/29/23 16:35:59.656 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:59.704 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:59.712 -[BeforeEach] [sig-node] RuntimeClass +STEP: Creating a kubernetes client 08/24/23 12:43:11.019 +Aug 24 12:43:11.019: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 12:43:11.022 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:11.088 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:11.094 +[BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 -[It] should support RuntimeClasses API operations [Conformance] - test/e2e/common/node/runtimeclass.go:189 -STEP: getting /apis 07/29/23 16:35:59.718 -STEP: getting /apis/node.k8s.io 07/29/23 16:35:59.722 -STEP: getting /apis/node.k8s.io/v1 07/29/23 16:35:59.723 -STEP: creating 07/29/23 16:35:59.727 -STEP: watching 07/29/23 16:35:59.763 -Jul 29 16:35:59.763: INFO: starting watch -STEP: getting 07/29/23 16:35:59.774 -STEP: listing 07/29/23 16:35:59.778 -STEP: patching 07/29/23 16:35:59.784 -STEP: updating 07/29/23 16:35:59.795 -Jul 29 16:35:59.805: INFO: waiting for watch events with expected annotations -STEP: deleting 07/29/23 16:35:59.806 -STEP: deleting a collection 07/29/23 16:35:59.835 -[AfterEach] [sig-node] RuntimeClass +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:79 +STEP: Creating secret with name secret-test-map-72194b3f-abc6-4e6a-8a7b-894e80b216fc 08/24/23 12:43:11.101 +STEP: Creating a pod to test consume secrets 08/24/23 12:43:11.114 +Aug 24 12:43:11.127: INFO: Waiting up to 5m0s for pod "pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a" in namespace "secrets-1835" to be "Succeeded or Failed" +Aug 24 12:43:11.131: INFO: Pod "pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a": Phase="Pending", Reason="", readiness=false. Elapsed: 4.240055ms +Aug 24 12:43:13.140: INFO: Pod "pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013185953s +Aug 24 12:43:15.140: INFO: Pod "pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012592884s +STEP: Saw pod success 08/24/23 12:43:15.14 +Aug 24 12:43:15.140: INFO: Pod "pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a" satisfied condition "Succeeded or Failed" +Aug 24 12:43:15.146: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a container secret-volume-test: +STEP: delete the pod 08/24/23 12:43:15.157 +Aug 24 12:43:15.178: INFO: Waiting for pod pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a to disappear +Aug 24 12:43:15.184: INFO: Pod pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a no longer exists +[AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 16:35:59.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] RuntimeClass +Aug 24 12:43:15.184: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] RuntimeClass +[DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] RuntimeClass +[DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "runtimeclass-7045" for this suite. 07/29/23 16:35:59.874 +STEP: Destroying namespace "secrets-1835" for this suite. 08/24/23 12:43:15.193 ------------------------------ -• [0.277 seconds] -[sig-node] RuntimeClass -test/e2e/common/node/framework.go:23 - should support RuntimeClasses API operations [Conformance] - test/e2e/common/node/runtimeclass.go:189 +• [4.186 seconds] +[sig-storage] Secrets +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:79 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] RuntimeClass + [BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:59.649 - Jul 29 16:35:59.650: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename runtimeclass 07/29/23 16:35:59.656 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:59.704 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:59.712 - [BeforeEach] [sig-node] RuntimeClass + STEP: Creating a kubernetes client 08/24/23 12:43:11.019 + Aug 24 12:43:11.019: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 12:43:11.022 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:11.088 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:11.094 + [BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 - [It] should support RuntimeClasses API operations [Conformance] - test/e2e/common/node/runtimeclass.go:189 - STEP: getting /apis 07/29/23 16:35:59.718 - STEP: getting /apis/node.k8s.io 07/29/23 16:35:59.722 - STEP: getting /apis/node.k8s.io/v1 07/29/23 16:35:59.723 - STEP: creating 07/29/23 16:35:59.727 - STEP: watching 07/29/23 16:35:59.763 - Jul 29 16:35:59.763: INFO: starting watch - STEP: getting 07/29/23 16:35:59.774 - STEP: listing 07/29/23 16:35:59.778 - STEP: patching 07/29/23 16:35:59.784 - STEP: updating 07/29/23 16:35:59.795 - Jul 29 16:35:59.805: INFO: waiting for watch events with expected annotations - STEP: deleting 07/29/23 16:35:59.806 - STEP: deleting a collection 07/29/23 16:35:59.835 - [AfterEach] [sig-node] RuntimeClass + [It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:79 + STEP: Creating secret with name secret-test-map-72194b3f-abc6-4e6a-8a7b-894e80b216fc 08/24/23 12:43:11.101 + STEP: Creating a pod to test consume secrets 08/24/23 12:43:11.114 + Aug 24 12:43:11.127: INFO: Waiting up to 5m0s for pod "pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a" in namespace "secrets-1835" to be "Succeeded or Failed" + Aug 24 12:43:11.131: INFO: Pod "pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a": Phase="Pending", Reason="", readiness=false. Elapsed: 4.240055ms + Aug 24 12:43:13.140: INFO: Pod "pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013185953s + Aug 24 12:43:15.140: INFO: Pod "pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012592884s + STEP: Saw pod success 08/24/23 12:43:15.14 + Aug 24 12:43:15.140: INFO: Pod "pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a" satisfied condition "Succeeded or Failed" + Aug 24 12:43:15.146: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a container secret-volume-test: + STEP: delete the pod 08/24/23 12:43:15.157 + Aug 24 12:43:15.178: INFO: Waiting for pod pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a to disappear + Aug 24 12:43:15.184: INFO: Pod pod-secrets-2404cde3-8baa-499d-b64e-a90ec651846a no longer exists + [AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 16:35:59.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] RuntimeClass + Aug 24 12:43:15.184: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] RuntimeClass + [DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] RuntimeClass + [DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "runtimeclass-7045" for this suite. 07/29/23 16:35:59.874 + STEP: Destroying namespace "secrets-1835" for this suite. 08/24/23 12:43:15.193 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:261 -[BeforeEach] [sig-storage] Projected downwardAPI +[sig-apps] ReplicationController + should serve a basic image on each replica with a public image [Conformance] + test/e2e/apps/rc.go:67 +[BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:35:59.935 -Jul 29 16:35:59.935: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:35:59.937 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:59.972 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:59.979 -[BeforeEach] [sig-storage] Projected downwardAPI +STEP: Creating a kubernetes client 08/24/23 12:43:15.213 +Aug 24 12:43:15.214: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replication-controller 08/24/23 12:43:15.216 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:15.242 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:15.247 +[BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:261 -STEP: Creating a pod to test downward API volume plugin 07/29/23 16:35:59.984 -Jul 29 16:36:00.002: INFO: Waiting up to 5m0s for pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4" in namespace "projected-1926" to be "Succeeded or Failed" -Jul 29 16:36:00.012: INFO: Pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4": Phase="Pending", Reason="", readiness=false. Elapsed: 9.415486ms -Jul 29 16:36:02.032: INFO: Pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4": Phase="Running", Reason="", readiness=true. Elapsed: 2.029389946s -Jul 29 16:36:04.019: INFO: Pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4": Phase="Running", Reason="", readiness=false. Elapsed: 4.016873613s -Jul 29 16:36:06.028: INFO: Pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.025476547s -STEP: Saw pod success 07/29/23 16:36:06.028 -Jul 29 16:36:06.028: INFO: Pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4" satisfied condition "Succeeded or Failed" -Jul 29 16:36:06.036: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4 container client-container: -STEP: delete the pod 07/29/23 16:36:06.055 -Jul 29 16:36:06.085: INFO: Waiting for pod downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4 to disappear -Jul 29 16:36:06.099: INFO: Pod downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4 no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +[BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 +[It] should serve a basic image on each replica with a public image [Conformance] + test/e2e/apps/rc.go:67 +STEP: Creating replication controller my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a 08/24/23 12:43:15.252 +Aug 24 12:43:15.270: INFO: Pod name my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a: Found 0 pods out of 1 +Aug 24 12:43:20.279: INFO: Pod name my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a: Found 1 pods out of 1 +Aug 24 12:43:20.279: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a" are running +Aug 24 12:43:20.279: INFO: Waiting up to 5m0s for pod "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9" in namespace "replication-controller-1066" to be "running" +Aug 24 12:43:20.286: INFO: Pod "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9": Phase="Running", Reason="", readiness=true. Elapsed: 7.261335ms +Aug 24 12:43:20.287: INFO: Pod "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9" satisfied condition "running" +Aug 24 12:43:20.287: INFO: Pod "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:43:15 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:43:17 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:43:17 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:43:15 +0000 UTC Reason: Message:}]) +Aug 24 12:43:20.287: INFO: Trying to dial the pod +Aug 24 12:43:25.315: INFO: Controller my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a: Got expected result from replica 1 [my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9]: "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 -Jul 29 16:36:06.099: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +Aug 24 12:43:25.315: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 -STEP: Destroying namespace "projected-1926" for this suite. 07/29/23 16:36:06.112 +STEP: Destroying namespace "replication-controller-1066" for this suite. 08/24/23 12:43:25.324 ------------------------------ -• [SLOW TEST] [6.191 seconds] -[sig-storage] Projected downwardAPI -test/e2e/common/storage/framework.go:23 - should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:261 +• [SLOW TEST] [10.123 seconds] +[sig-apps] ReplicationController +test/e2e/apps/framework.go:23 + should serve a basic image on each replica with a public image [Conformance] + test/e2e/apps/rc.go:67 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:35:59.935 - Jul 29 16:35:59.935: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:35:59.937 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:35:59.972 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:35:59.979 - [BeforeEach] [sig-storage] Projected downwardAPI + STEP: Creating a kubernetes client 08/24/23 12:43:15.213 + Aug 24 12:43:15.214: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replication-controller 08/24/23 12:43:15.216 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:15.242 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:15.247 + [BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:261 - STEP: Creating a pod to test downward API volume plugin 07/29/23 16:35:59.984 - Jul 29 16:36:00.002: INFO: Waiting up to 5m0s for pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4" in namespace "projected-1926" to be "Succeeded or Failed" - Jul 29 16:36:00.012: INFO: Pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4": Phase="Pending", Reason="", readiness=false. Elapsed: 9.415486ms - Jul 29 16:36:02.032: INFO: Pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4": Phase="Running", Reason="", readiness=true. Elapsed: 2.029389946s - Jul 29 16:36:04.019: INFO: Pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4": Phase="Running", Reason="", readiness=false. Elapsed: 4.016873613s - Jul 29 16:36:06.028: INFO: Pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.025476547s - STEP: Saw pod success 07/29/23 16:36:06.028 - Jul 29 16:36:06.028: INFO: Pod "downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4" satisfied condition "Succeeded or Failed" - Jul 29 16:36:06.036: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4 container client-container: - STEP: delete the pod 07/29/23 16:36:06.055 - Jul 29 16:36:06.085: INFO: Waiting for pod downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4 to disappear - Jul 29 16:36:06.099: INFO: Pod downwardapi-volume-987a187c-e7fa-49e6-bd24-4f4415dc69a4 no longer exists - [AfterEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 + [It] should serve a basic image on each replica with a public image [Conformance] + test/e2e/apps/rc.go:67 + STEP: Creating replication controller my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a 08/24/23 12:43:15.252 + Aug 24 12:43:15.270: INFO: Pod name my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a: Found 0 pods out of 1 + Aug 24 12:43:20.279: INFO: Pod name my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a: Found 1 pods out of 1 + Aug 24 12:43:20.279: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a" are running + Aug 24 12:43:20.279: INFO: Waiting up to 5m0s for pod "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9" in namespace "replication-controller-1066" to be "running" + Aug 24 12:43:20.286: INFO: Pod "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9": Phase="Running", Reason="", readiness=true. Elapsed: 7.261335ms + Aug 24 12:43:20.287: INFO: Pod "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9" satisfied condition "running" + Aug 24 12:43:20.287: INFO: Pod "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:43:15 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:43:17 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:43:17 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-08-24 12:43:15 +0000 UTC Reason: Message:}]) + Aug 24 12:43:20.287: INFO: Trying to dial the pod + Aug 24 12:43:25.315: INFO: Controller my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a: Got expected result from replica 1 [my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9]: "my-hostname-basic-9a693b0f-4845-47fa-93ea-e6b3e472034a-7gqw9", 1 of 1 required successes so far + [AfterEach] [sig-apps] ReplicationController + test/e2e/framework/node/init/init.go:32 + Aug 24 12:43:25.315: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ReplicationController + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-apps] ReplicationController + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-apps] ReplicationController + tear down framework | framework.go:193 + STEP: Destroying namespace "replication-controller-1066" for this suite. 08/24/23 12:43:25.324 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD preserving unknown fields at the schema root [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:194 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:43:25.339 +Aug 24 12:43:25.339: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:43:25.343 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:25.372 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:25.377 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:31 +[It] works for CRD preserving unknown fields at the schema root [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:194 +Aug 24 12:43:25.383: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 08/24/23 12:43:28.879 +Aug 24 12:43:28.880: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-2201 --namespace=crd-publish-openapi-2201 create -f -' +Aug 24 12:43:30.411: INFO: stderr: "" +Aug 24 12:43:30.411: INFO: stdout: "e2e-test-crd-publish-openapi-1541-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" +Aug 24 12:43:30.412: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-2201 --namespace=crd-publish-openapi-2201 delete e2e-test-crd-publish-openapi-1541-crds test-cr' +Aug 24 12:43:30.596: INFO: stderr: "" +Aug 24 12:43:30.596: INFO: stdout: "e2e-test-crd-publish-openapi-1541-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" +Aug 24 12:43:30.597: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-2201 --namespace=crd-publish-openapi-2201 apply -f -' +Aug 24 12:43:31.797: INFO: stderr: "" +Aug 24 12:43:31.797: INFO: stdout: "e2e-test-crd-publish-openapi-1541-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" +Aug 24 12:43:31.798: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-2201 --namespace=crd-publish-openapi-2201 delete e2e-test-crd-publish-openapi-1541-crds test-cr' +Aug 24 12:43:31.979: INFO: stderr: "" +Aug 24 12:43:31.979: INFO: stdout: "e2e-test-crd-publish-openapi-1541-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" +STEP: kubectl explain works to explain CR 08/24/23 12:43:31.979 +Aug 24 12:43:31.980: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-2201 explain e2e-test-crd-publish-openapi-1541-crds' +Aug 24 12:43:32.431: INFO: stderr: "" +Aug 24 12:43:32.431: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-1541-crd\nVERSION: crd-publish-openapi-test-unknown-at-root.example.com/v1\n\nDESCRIPTION:\n \n" +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + test/e2e/framework/node/init/init.go:32 +Aug 24 12:43:34.750: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + tear down framework | framework.go:193 +STEP: Destroying namespace "crd-publish-openapi-2201" for this suite. 08/24/23 12:43:34.772 +------------------------------ +• [SLOW TEST] [9.447 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + works for CRD preserving unknown fields at the schema root [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:194 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:43:25.339 + Aug 24 12:43:25.339: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:43:25.343 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:25.372 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:25.377 + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + test/e2e/framework/metrics/init/init.go:31 + [It] works for CRD preserving unknown fields at the schema root [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:194 + Aug 24 12:43:25.383: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: kubectl validation (kubectl create and apply) allows request with any unknown properties 08/24/23 12:43:28.879 + Aug 24 12:43:28.880: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-2201 --namespace=crd-publish-openapi-2201 create -f -' + Aug 24 12:43:30.411: INFO: stderr: "" + Aug 24 12:43:30.411: INFO: stdout: "e2e-test-crd-publish-openapi-1541-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" + Aug 24 12:43:30.412: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-2201 --namespace=crd-publish-openapi-2201 delete e2e-test-crd-publish-openapi-1541-crds test-cr' + Aug 24 12:43:30.596: INFO: stderr: "" + Aug 24 12:43:30.596: INFO: stdout: "e2e-test-crd-publish-openapi-1541-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" + Aug 24 12:43:30.597: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-2201 --namespace=crd-publish-openapi-2201 apply -f -' + Aug 24 12:43:31.797: INFO: stderr: "" + Aug 24 12:43:31.797: INFO: stdout: "e2e-test-crd-publish-openapi-1541-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" + Aug 24 12:43:31.798: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-2201 --namespace=crd-publish-openapi-2201 delete e2e-test-crd-publish-openapi-1541-crds test-cr' + Aug 24 12:43:31.979: INFO: stderr: "" + Aug 24 12:43:31.979: INFO: stdout: "e2e-test-crd-publish-openapi-1541-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" + STEP: kubectl explain works to explain CR 08/24/23 12:43:31.979 + Aug 24 12:43:31.980: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-2201 explain e2e-test-crd-publish-openapi-1541-crds' + Aug 24 12:43:32.431: INFO: stderr: "" + Aug 24 12:43:32.431: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-1541-crd\nVERSION: crd-publish-openapi-test-unknown-at-root.example.com/v1\n\nDESCRIPTION:\n \n" + [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:36:06.099: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 12:43:34.750: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "projected-1926" for this suite. 07/29/23 16:36:06.112 + STEP: Destroying namespace "crd-publish-openapi-2201" for this suite. 08/24/23 12:43:34.772 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Variable Expansion - should allow substituting values in a volume subpath [Conformance] - test/e2e/common/node/expansion.go:112 -[BeforeEach] [sig-node] Variable Expansion +[sig-storage] Secrets + optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:205 +[BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:36:06.128 -Jul 29 16:36:06.128: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename var-expansion 07/29/23 16:36:06.131 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:06.173 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:06.199 -[BeforeEach] [sig-node] Variable Expansion +STEP: Creating a kubernetes client 08/24/23 12:43:34.792 +Aug 24 12:43:34.792: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 12:43:34.795 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:34.823 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:34.828 +[BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 -[It] should allow substituting values in a volume subpath [Conformance] - test/e2e/common/node/expansion.go:112 -STEP: Creating a pod to test substitution in volume subpath 07/29/23 16:36:06.208 -Jul 29 16:36:06.225: INFO: Waiting up to 5m0s for pod "var-expansion-e28441b5-a34c-4380-aec6-025d35365f48" in namespace "var-expansion-1067" to be "Succeeded or Failed" -Jul 29 16:36:06.246: INFO: Pod "var-expansion-e28441b5-a34c-4380-aec6-025d35365f48": Phase="Pending", Reason="", readiness=false. Elapsed: 20.572161ms -Jul 29 16:36:08.254: INFO: Pod "var-expansion-e28441b5-a34c-4380-aec6-025d35365f48": Phase="Pending", Reason="", readiness=false. Elapsed: 2.028552161s -Jul 29 16:36:10.254: INFO: Pod "var-expansion-e28441b5-a34c-4380-aec6-025d35365f48": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028463617s -STEP: Saw pod success 07/29/23 16:36:10.254 -Jul 29 16:36:10.254: INFO: Pod "var-expansion-e28441b5-a34c-4380-aec6-025d35365f48" satisfied condition "Succeeded or Failed" -Jul 29 16:36:10.261: INFO: Trying to get logs from node wetuj3nuajog-3 pod var-expansion-e28441b5-a34c-4380-aec6-025d35365f48 container dapi-container: -STEP: delete the pod 07/29/23 16:36:10.275 -Jul 29 16:36:10.294: INFO: Waiting for pod var-expansion-e28441b5-a34c-4380-aec6-025d35365f48 to disappear -Jul 29 16:36:10.301: INFO: Pod var-expansion-e28441b5-a34c-4380-aec6-025d35365f48 no longer exists -[AfterEach] [sig-node] Variable Expansion +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:205 +STEP: Creating secret with name s-test-opt-del-8c4e7b12-6e90-4d8f-9673-5f3c2cdef86c 08/24/23 12:43:34.841 +STEP: Creating secret with name s-test-opt-upd-ecd98a61-7581-4928-be29-0259ac80d28d 08/24/23 12:43:34.849 +STEP: Creating the pod 08/24/23 12:43:34.857 +Aug 24 12:43:34.874: INFO: Waiting up to 5m0s for pod "pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c" in namespace "secrets-9535" to be "running and ready" +Aug 24 12:43:34.882: INFO: Pod "pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c": Phase="Pending", Reason="", readiness=false. Elapsed: 7.888432ms +Aug 24 12:43:34.882: INFO: The phase of Pod pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:43:36.891: INFO: Pod "pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016960497s +Aug 24 12:43:36.891: INFO: The phase of Pod pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:43:38.892: INFO: Pod "pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c": Phase="Running", Reason="", readiness=true. Elapsed: 4.017402697s +Aug 24 12:43:38.892: INFO: The phase of Pod pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c is Running (Ready = true) +Aug 24 12:43:38.892: INFO: Pod "pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c" satisfied condition "running and ready" +STEP: Deleting secret s-test-opt-del-8c4e7b12-6e90-4d8f-9673-5f3c2cdef86c 08/24/23 12:43:38.966 +STEP: Updating secret s-test-opt-upd-ecd98a61-7581-4928-be29-0259ac80d28d 08/24/23 12:43:38.979 +STEP: Creating secret with name s-test-opt-create-40cd2069-7fd7-4eaf-b767-d96ec27efd51 08/24/23 12:43:39.001 +STEP: waiting to observe update in volume 08/24/23 12:43:39.019 +[AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 16:36:10.301: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Variable Expansion +Aug 24 12:45:03.896: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "var-expansion-1067" for this suite. 07/29/23 16:36:10.31 +STEP: Destroying namespace "secrets-9535" for this suite. 08/24/23 12:45:03.908 ------------------------------ -• [4.194 seconds] -[sig-node] Variable Expansion -test/e2e/common/node/framework.go:23 - should allow substituting values in a volume subpath [Conformance] - test/e2e/common/node/expansion.go:112 +• [SLOW TEST] [89.134 seconds] +[sig-storage] Secrets +test/e2e/common/storage/framework.go:23 + optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:205 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Variable Expansion + [BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:36:06.128 - Jul 29 16:36:06.128: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename var-expansion 07/29/23 16:36:06.131 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:06.173 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:06.199 - [BeforeEach] [sig-node] Variable Expansion + STEP: Creating a kubernetes client 08/24/23 12:43:34.792 + Aug 24 12:43:34.792: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 12:43:34.795 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:43:34.823 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:43:34.828 + [BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 - [It] should allow substituting values in a volume subpath [Conformance] - test/e2e/common/node/expansion.go:112 - STEP: Creating a pod to test substitution in volume subpath 07/29/23 16:36:06.208 - Jul 29 16:36:06.225: INFO: Waiting up to 5m0s for pod "var-expansion-e28441b5-a34c-4380-aec6-025d35365f48" in namespace "var-expansion-1067" to be "Succeeded or Failed" - Jul 29 16:36:06.246: INFO: Pod "var-expansion-e28441b5-a34c-4380-aec6-025d35365f48": Phase="Pending", Reason="", readiness=false. Elapsed: 20.572161ms - Jul 29 16:36:08.254: INFO: Pod "var-expansion-e28441b5-a34c-4380-aec6-025d35365f48": Phase="Pending", Reason="", readiness=false. Elapsed: 2.028552161s - Jul 29 16:36:10.254: INFO: Pod "var-expansion-e28441b5-a34c-4380-aec6-025d35365f48": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028463617s - STEP: Saw pod success 07/29/23 16:36:10.254 - Jul 29 16:36:10.254: INFO: Pod "var-expansion-e28441b5-a34c-4380-aec6-025d35365f48" satisfied condition "Succeeded or Failed" - Jul 29 16:36:10.261: INFO: Trying to get logs from node wetuj3nuajog-3 pod var-expansion-e28441b5-a34c-4380-aec6-025d35365f48 container dapi-container: - STEP: delete the pod 07/29/23 16:36:10.275 - Jul 29 16:36:10.294: INFO: Waiting for pod var-expansion-e28441b5-a34c-4380-aec6-025d35365f48 to disappear - Jul 29 16:36:10.301: INFO: Pod var-expansion-e28441b5-a34c-4380-aec6-025d35365f48 no longer exists - [AfterEach] [sig-node] Variable Expansion + [It] optional updates should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:205 + STEP: Creating secret with name s-test-opt-del-8c4e7b12-6e90-4d8f-9673-5f3c2cdef86c 08/24/23 12:43:34.841 + STEP: Creating secret with name s-test-opt-upd-ecd98a61-7581-4928-be29-0259ac80d28d 08/24/23 12:43:34.849 + STEP: Creating the pod 08/24/23 12:43:34.857 + Aug 24 12:43:34.874: INFO: Waiting up to 5m0s for pod "pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c" in namespace "secrets-9535" to be "running and ready" + Aug 24 12:43:34.882: INFO: Pod "pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c": Phase="Pending", Reason="", readiness=false. Elapsed: 7.888432ms + Aug 24 12:43:34.882: INFO: The phase of Pod pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:43:36.891: INFO: Pod "pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016960497s + Aug 24 12:43:36.891: INFO: The phase of Pod pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:43:38.892: INFO: Pod "pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c": Phase="Running", Reason="", readiness=true. Elapsed: 4.017402697s + Aug 24 12:43:38.892: INFO: The phase of Pod pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c is Running (Ready = true) + Aug 24 12:43:38.892: INFO: Pod "pod-secrets-cd6d4c3e-afcf-49dd-8f71-2c031f1e973c" satisfied condition "running and ready" + STEP: Deleting secret s-test-opt-del-8c4e7b12-6e90-4d8f-9673-5f3c2cdef86c 08/24/23 12:43:38.966 + STEP: Updating secret s-test-opt-upd-ecd98a61-7581-4928-be29-0259ac80d28d 08/24/23 12:43:38.979 + STEP: Creating secret with name s-test-opt-create-40cd2069-7fd7-4eaf-b767-d96ec27efd51 08/24/23 12:43:39.001 + STEP: waiting to observe update in volume 08/24/23 12:43:39.019 + [AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 16:36:10.301: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Variable Expansion + Aug 24 12:45:03.896: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "var-expansion-1067" for this suite. 07/29/23 16:36:10.31 + STEP: Destroying namespace "secrets-9535" for this suite. 08/24/23 12:45:03.908 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSS +SSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition - getting/updating/patching custom resource definition status sub-resource works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:145 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[sig-storage] Projected secret + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:56 +[BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:36:10.326 -Jul 29 16:36:10.326: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename custom-resource-definition 07/29/23 16:36:10.328 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:10.362 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:10.371 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:45:03.931 +Aug 24 12:45:03.931: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:45:03.935 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:03.966 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:03.973 +[BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 -[It] getting/updating/patching custom resource definition status sub-resource works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:145 -Jul 29 16:36:10.377: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:56 +STEP: Creating projection with secret that has name projected-secret-test-e2e8f205-6181-41dc-bb08-9c561cf11163 08/24/23 12:45:03.981 +STEP: Creating a pod to test consume secrets 08/24/23 12:45:03.991 +Aug 24 12:45:04.011: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5" in namespace "projected-9976" to be "Succeeded or Failed" +Aug 24 12:45:04.018: INFO: Pod "pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5": Phase="Pending", Reason="", readiness=false. Elapsed: 7.695446ms +Aug 24 12:45:06.026: INFO: Pod "pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015083904s +Aug 24 12:45:08.026: INFO: Pod "pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01495404s +STEP: Saw pod success 08/24/23 12:45:08.026 +Aug 24 12:45:08.026: INFO: Pod "pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5" satisfied condition "Succeeded or Failed" +Aug 24 12:45:08.032: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5 container projected-secret-volume-test: +STEP: delete the pod 08/24/23 12:45:08.044 +Aug 24 12:45:08.074: INFO: Waiting for pod pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5 to disappear +Aug 24 12:45:08.085: INFO: Pod pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5 no longer exists +[AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 -Jul 29 16:36:10.959: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +Aug 24 12:45:08.085: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 -STEP: Destroying namespace "custom-resource-definition-4244" for this suite. 07/29/23 16:36:10.973 +STEP: Destroying namespace "projected-9976" for this suite. 08/24/23 12:45:08.098 ------------------------------ -• [0.662 seconds] -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - Simple CustomResourceDefinition - test/e2e/apimachinery/custom_resource_definition.go:50 - getting/updating/patching custom resource definition status sub-resource works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:145 +• [4.181 seconds] +[sig-storage] Projected secret +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:56 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:36:10.326 - Jul 29 16:36:10.326: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename custom-resource-definition 07/29/23 16:36:10.328 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:10.362 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:10.371 - [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:45:03.931 + Aug 24 12:45:03.931: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:45:03.935 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:03.966 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:03.973 + [BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 - [It] getting/updating/patching custom resource definition status sub-resource works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:145 - Jul 29 16:36:10.377: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:56 + STEP: Creating projection with secret that has name projected-secret-test-e2e8f205-6181-41dc-bb08-9c561cf11163 08/24/23 12:45:03.981 + STEP: Creating a pod to test consume secrets 08/24/23 12:45:03.991 + Aug 24 12:45:04.011: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5" in namespace "projected-9976" to be "Succeeded or Failed" + Aug 24 12:45:04.018: INFO: Pod "pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5": Phase="Pending", Reason="", readiness=false. Elapsed: 7.695446ms + Aug 24 12:45:06.026: INFO: Pod "pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015083904s + Aug 24 12:45:08.026: INFO: Pod "pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01495404s + STEP: Saw pod success 08/24/23 12:45:08.026 + Aug 24 12:45:08.026: INFO: Pod "pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5" satisfied condition "Succeeded or Failed" + Aug 24 12:45:08.032: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5 container projected-secret-volume-test: + STEP: delete the pod 08/24/23 12:45:08.044 + Aug 24 12:45:08.074: INFO: Waiting for pod pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5 to disappear + Aug 24 12:45:08.085: INFO: Pod pod-projected-secrets-18d11c13-90a8-47a7-8828-bf96ab7781e5 no longer exists + [AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 - Jul 29 16:36:10.959: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + Aug 24 12:45:08.085: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 - STEP: Destroying namespace "custom-resource-definition-4244" for this suite. 07/29/23 16:36:10.973 + STEP: Destroying namespace "projected-9976" for this suite. 08/24/23 12:45:08.098 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition - listing custom resource definition objects works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:85 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + should validate Statefulset Status endpoints [Conformance] + test/e2e/apps/statefulset.go:977 +[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:36:10.988 -Jul 29 16:36:10.988: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename custom-resource-definition 07/29/23 16:36:10.99 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:11.016 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:11.021 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:45:08.115 +Aug 24 12:45:08.116: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename statefulset 08/24/23 12:45:08.117 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:08.147 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:08.151 +[BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 -[It] listing custom resource definition objects works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:85 -Jul 29 16:36:11.026: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 +STEP: Creating service test in namespace statefulset-219 08/24/23 12:45:08.157 +[It] should validate Statefulset Status endpoints [Conformance] + test/e2e/apps/statefulset.go:977 +STEP: Creating statefulset ss in namespace statefulset-219 08/24/23 12:45:08.186 +Aug 24 12:45:08.216: INFO: Found 0 stateful pods, waiting for 1 +Aug 24 12:45:18.226: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Patch Statefulset to include a label 08/24/23 12:45:18.237 +STEP: Getting /status 08/24/23 12:45:18.254 +Aug 24 12:45:18.265: INFO: StatefulSet ss has Conditions: []v1.StatefulSetCondition(nil) +STEP: updating the StatefulSet Status 08/24/23 12:45:18.265 +Aug 24 12:45:18.285: INFO: updatedStatus.Conditions: []v1.StatefulSetCondition{v1.StatefulSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the statefulset status to be updated 08/24/23 12:45:18.286 +Aug 24 12:45:18.291: INFO: Observed &StatefulSet event: ADDED +Aug 24 12:45:18.292: INFO: Found Statefulset ss in namespace statefulset-219 with labels: map[e2e:testing] annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} +Aug 24 12:45:18.292: INFO: Statefulset ss has an updated status +STEP: patching the Statefulset Status 08/24/23 12:45:18.292 +Aug 24 12:45:18.292: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} +Aug 24 12:45:18.306: INFO: Patched status conditions: []v1.StatefulSetCondition{v1.StatefulSetCondition{Type:"StatusPatched", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} +STEP: watching for the Statefulset status to be patched 08/24/23 12:45:18.306 +Aug 24 12:45:18.310: INFO: Observed &StatefulSet event: ADDED +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 +Aug 24 12:45:18.311: INFO: Deleting all statefulset in ns statefulset-219 +Aug 24 12:45:18.317: INFO: Scaling statefulset ss to 0 +Aug 24 12:45:28.373: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 12:45:28.384: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 -Jul 29 16:36:17.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +Aug 24 12:45:28.429: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 -STEP: Destroying namespace "custom-resource-definition-5142" for this suite. 07/29/23 16:36:17.695 +STEP: Destroying namespace "statefulset-219" for this suite. 08/24/23 12:45:28.455 ------------------------------ -• [SLOW TEST] [6.720 seconds] -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - Simple CustomResourceDefinition - test/e2e/apimachinery/custom_resource_definition.go:50 - listing custom resource definition objects works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:85 +• [SLOW TEST] [20.356 seconds] +[sig-apps] StatefulSet +test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:103 + should validate Statefulset Status endpoints [Conformance] + test/e2e/apps/statefulset.go:977 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:36:10.988 - Jul 29 16:36:10.988: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename custom-resource-definition 07/29/23 16:36:10.99 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:11.016 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:11.021 - [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:45:08.115 + Aug 24 12:45:08.116: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename statefulset 08/24/23 12:45:08.117 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:08.147 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:08.151 + [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 - [It] listing custom resource definition objects works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:85 - Jul 29 16:36:11.026: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 + [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 + STEP: Creating service test in namespace statefulset-219 08/24/23 12:45:08.157 + [It] should validate Statefulset Status endpoints [Conformance] + test/e2e/apps/statefulset.go:977 + STEP: Creating statefulset ss in namespace statefulset-219 08/24/23 12:45:08.186 + Aug 24 12:45:08.216: INFO: Found 0 stateful pods, waiting for 1 + Aug 24 12:45:18.226: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true + STEP: Patch Statefulset to include a label 08/24/23 12:45:18.237 + STEP: Getting /status 08/24/23 12:45:18.254 + Aug 24 12:45:18.265: INFO: StatefulSet ss has Conditions: []v1.StatefulSetCondition(nil) + STEP: updating the StatefulSet Status 08/24/23 12:45:18.265 + Aug 24 12:45:18.285: INFO: updatedStatus.Conditions: []v1.StatefulSetCondition{v1.StatefulSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} + STEP: watching for the statefulset status to be updated 08/24/23 12:45:18.286 + Aug 24 12:45:18.291: INFO: Observed &StatefulSet event: ADDED + Aug 24 12:45:18.292: INFO: Found Statefulset ss in namespace statefulset-219 with labels: map[e2e:testing] annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} + Aug 24 12:45:18.292: INFO: Statefulset ss has an updated status + STEP: patching the Statefulset Status 08/24/23 12:45:18.292 + Aug 24 12:45:18.292: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} + Aug 24 12:45:18.306: INFO: Patched status conditions: []v1.StatefulSetCondition{v1.StatefulSetCondition{Type:"StatusPatched", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} + STEP: watching for the Statefulset status to be patched 08/24/23 12:45:18.306 + Aug 24 12:45:18.310: INFO: Observed &StatefulSet event: ADDED + [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 + Aug 24 12:45:18.311: INFO: Deleting all statefulset in ns statefulset-219 + Aug 24 12:45:18.317: INFO: Scaling statefulset ss to 0 + Aug 24 12:45:28.373: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 12:45:28.384: INFO: Deleting statefulset ss + [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 - Jul 29 16:36:17.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + Aug 24 12:45:28.429: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 - STEP: Destroying namespace "custom-resource-definition-5142" for this suite. 07/29/23 16:36:17.695 + STEP: Destroying namespace "statefulset-219" for this suite. 08/24/23 12:45:28.455 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSS ------------------------------ -[sig-apps] Job - should adopt matching orphans and release non-matching pods [Conformance] - test/e2e/apps/job.go:507 -[BeforeEach] [sig-apps] Job +[sig-network] Ingress API + should support creating Ingress API operations [Conformance] + test/e2e/network/ingress.go:552 +[BeforeEach] [sig-network] Ingress API set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:36:17.716 -Jul 29 16:36:17.717: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename job 07/29/23 16:36:17.719 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:17.741 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:17.745 -[BeforeEach] [sig-apps] Job +STEP: Creating a kubernetes client 08/24/23 12:45:28.481 +Aug 24 12:45:28.481: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename ingress 08/24/23 12:45:28.486 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:28.526 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:28.53 +[BeforeEach] [sig-network] Ingress API test/e2e/framework/metrics/init/init.go:31 -[It] should adopt matching orphans and release non-matching pods [Conformance] - test/e2e/apps/job.go:507 -STEP: Creating a job 07/29/23 16:36:17.749 -STEP: Ensuring active pods == parallelism 07/29/23 16:36:17.764 -STEP: Orphaning one of the Job's Pods 07/29/23 16:36:19.778 -Jul 29 16:36:20.319: INFO: Successfully updated pod "adopt-release-kdh2f" -STEP: Checking that the Job readopts the Pod 07/29/23 16:36:20.319 -Jul 29 16:36:20.320: INFO: Waiting up to 15m0s for pod "adopt-release-kdh2f" in namespace "job-5680" to be "adopted" -Jul 29 16:36:20.328: INFO: Pod "adopt-release-kdh2f": Phase="Running", Reason="", readiness=true. Elapsed: 8.12339ms -Jul 29 16:36:22.336: INFO: Pod "adopt-release-kdh2f": Phase="Running", Reason="", readiness=true. Elapsed: 2.015537327s -Jul 29 16:36:22.336: INFO: Pod "adopt-release-kdh2f" satisfied condition "adopted" -STEP: Removing the labels from the Job's Pod 07/29/23 16:36:22.336 -Jul 29 16:36:22.878: INFO: Successfully updated pod "adopt-release-kdh2f" -STEP: Checking that the Job releases the Pod 07/29/23 16:36:22.878 -Jul 29 16:36:22.879: INFO: Waiting up to 15m0s for pod "adopt-release-kdh2f" in namespace "job-5680" to be "released" -Jul 29 16:36:22.885: INFO: Pod "adopt-release-kdh2f": Phase="Running", Reason="", readiness=true. Elapsed: 6.149946ms -Jul 29 16:36:24.894: INFO: Pod "adopt-release-kdh2f": Phase="Running", Reason="", readiness=true. Elapsed: 2.015233677s -Jul 29 16:36:24.894: INFO: Pod "adopt-release-kdh2f" satisfied condition "released" -[AfterEach] [sig-apps] Job +[It] should support creating Ingress API operations [Conformance] + test/e2e/network/ingress.go:552 +STEP: getting /apis 08/24/23 12:45:28.535 +STEP: getting /apis/networking.k8s.io 08/24/23 12:45:28.539 +STEP: getting /apis/networking.k8s.iov1 08/24/23 12:45:28.541 +STEP: creating 08/24/23 12:45:28.552 +STEP: getting 08/24/23 12:45:28.582 +STEP: listing 08/24/23 12:45:28.588 +STEP: watching 08/24/23 12:45:28.594 +Aug 24 12:45:28.594: INFO: starting watch +STEP: cluster-wide listing 08/24/23 12:45:28.596 +STEP: cluster-wide watching 08/24/23 12:45:28.601 +Aug 24 12:45:28.602: INFO: starting watch +STEP: patching 08/24/23 12:45:28.603 +STEP: updating 08/24/23 12:45:28.613 +Aug 24 12:45:28.628: INFO: waiting for watch events with expected annotations +Aug 24 12:45:28.629: INFO: saw patched and updated annotations +STEP: patching /status 08/24/23 12:45:28.629 +STEP: updating /status 08/24/23 12:45:28.64 +STEP: get /status 08/24/23 12:45:28.656 +STEP: deleting 08/24/23 12:45:28.662 +STEP: deleting a collection 08/24/23 12:45:28.686 +[AfterEach] [sig-network] Ingress API test/e2e/framework/node/init/init.go:32 -Jul 29 16:36:24.894: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Job +Aug 24 12:45:28.716: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Ingress API test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Job +[DeferCleanup (Each)] [sig-network] Ingress API dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Job +[DeferCleanup (Each)] [sig-network] Ingress API tear down framework | framework.go:193 -STEP: Destroying namespace "job-5680" for this suite. 07/29/23 16:36:24.907 +STEP: Destroying namespace "ingress-1897" for this suite. 08/24/23 12:45:28.726 ------------------------------ -• [SLOW TEST] [7.209 seconds] -[sig-apps] Job -test/e2e/apps/framework.go:23 - should adopt matching orphans and release non-matching pods [Conformance] - test/e2e/apps/job.go:507 +• [0.261 seconds] +[sig-network] Ingress API +test/e2e/network/common/framework.go:23 + should support creating Ingress API operations [Conformance] + test/e2e/network/ingress.go:552 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Job + [BeforeEach] [sig-network] Ingress API set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:36:17.716 - Jul 29 16:36:17.717: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename job 07/29/23 16:36:17.719 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:17.741 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:17.745 - [BeforeEach] [sig-apps] Job + STEP: Creating a kubernetes client 08/24/23 12:45:28.481 + Aug 24 12:45:28.481: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename ingress 08/24/23 12:45:28.486 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:28.526 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:28.53 + [BeforeEach] [sig-network] Ingress API test/e2e/framework/metrics/init/init.go:31 - [It] should adopt matching orphans and release non-matching pods [Conformance] - test/e2e/apps/job.go:507 - STEP: Creating a job 07/29/23 16:36:17.749 - STEP: Ensuring active pods == parallelism 07/29/23 16:36:17.764 - STEP: Orphaning one of the Job's Pods 07/29/23 16:36:19.778 - Jul 29 16:36:20.319: INFO: Successfully updated pod "adopt-release-kdh2f" - STEP: Checking that the Job readopts the Pod 07/29/23 16:36:20.319 - Jul 29 16:36:20.320: INFO: Waiting up to 15m0s for pod "adopt-release-kdh2f" in namespace "job-5680" to be "adopted" - Jul 29 16:36:20.328: INFO: Pod "adopt-release-kdh2f": Phase="Running", Reason="", readiness=true. Elapsed: 8.12339ms - Jul 29 16:36:22.336: INFO: Pod "adopt-release-kdh2f": Phase="Running", Reason="", readiness=true. Elapsed: 2.015537327s - Jul 29 16:36:22.336: INFO: Pod "adopt-release-kdh2f" satisfied condition "adopted" - STEP: Removing the labels from the Job's Pod 07/29/23 16:36:22.336 - Jul 29 16:36:22.878: INFO: Successfully updated pod "adopt-release-kdh2f" - STEP: Checking that the Job releases the Pod 07/29/23 16:36:22.878 - Jul 29 16:36:22.879: INFO: Waiting up to 15m0s for pod "adopt-release-kdh2f" in namespace "job-5680" to be "released" - Jul 29 16:36:22.885: INFO: Pod "adopt-release-kdh2f": Phase="Running", Reason="", readiness=true. Elapsed: 6.149946ms - Jul 29 16:36:24.894: INFO: Pod "adopt-release-kdh2f": Phase="Running", Reason="", readiness=true. Elapsed: 2.015233677s - Jul 29 16:36:24.894: INFO: Pod "adopt-release-kdh2f" satisfied condition "released" - [AfterEach] [sig-apps] Job + [It] should support creating Ingress API operations [Conformance] + test/e2e/network/ingress.go:552 + STEP: getting /apis 08/24/23 12:45:28.535 + STEP: getting /apis/networking.k8s.io 08/24/23 12:45:28.539 + STEP: getting /apis/networking.k8s.iov1 08/24/23 12:45:28.541 + STEP: creating 08/24/23 12:45:28.552 + STEP: getting 08/24/23 12:45:28.582 + STEP: listing 08/24/23 12:45:28.588 + STEP: watching 08/24/23 12:45:28.594 + Aug 24 12:45:28.594: INFO: starting watch + STEP: cluster-wide listing 08/24/23 12:45:28.596 + STEP: cluster-wide watching 08/24/23 12:45:28.601 + Aug 24 12:45:28.602: INFO: starting watch + STEP: patching 08/24/23 12:45:28.603 + STEP: updating 08/24/23 12:45:28.613 + Aug 24 12:45:28.628: INFO: waiting for watch events with expected annotations + Aug 24 12:45:28.629: INFO: saw patched and updated annotations + STEP: patching /status 08/24/23 12:45:28.629 + STEP: updating /status 08/24/23 12:45:28.64 + STEP: get /status 08/24/23 12:45:28.656 + STEP: deleting 08/24/23 12:45:28.662 + STEP: deleting a collection 08/24/23 12:45:28.686 + [AfterEach] [sig-network] Ingress API test/e2e/framework/node/init/init.go:32 - Jul 29 16:36:24.894: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Job + Aug 24 12:45:28.716: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Ingress API test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Job + [DeferCleanup (Each)] [sig-network] Ingress API dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Job + [DeferCleanup (Each)] [sig-network] Ingress API tear down framework | framework.go:193 - STEP: Destroying namespace "job-5680" for this suite. 07/29/23 16:36:24.907 + STEP: Destroying namespace "ingress-1897" for this suite. 08/24/23 12:45:28.726 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir wrapper volumes - should not conflict [Conformance] - test/e2e/storage/empty_dir_wrapper.go:67 -[BeforeEach] [sig-storage] EmptyDir wrapper volumes +[sig-api-machinery] Garbage collector + should delete pods created by rc when not orphaning [Conformance] + test/e2e/apimachinery/garbage_collector.go:312 +[BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:36:24.926 -Jul 29 16:36:24.926: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir-wrapper 07/29/23 16:36:24.928 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:24.956 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:24.962 -[BeforeEach] [sig-storage] EmptyDir wrapper volumes +STEP: Creating a kubernetes client 08/24/23 12:45:28.745 +Aug 24 12:45:28.745: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename gc 08/24/23 12:45:28.747 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:28.777 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:28.783 +[BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 -[It] should not conflict [Conformance] - test/e2e/storage/empty_dir_wrapper.go:67 -Jul 29 16:36:24.999: INFO: Waiting up to 5m0s for pod "pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764" in namespace "emptydir-wrapper-7786" to be "running and ready" -Jul 29 16:36:25.005: INFO: Pod "pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764": Phase="Pending", Reason="", readiness=false. Elapsed: 6.149662ms -Jul 29 16:36:25.005: INFO: The phase of Pod pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:36:27.014: INFO: Pod "pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764": Phase="Running", Reason="", readiness=true. Elapsed: 2.015110318s -Jul 29 16:36:27.014: INFO: The phase of Pod pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764 is Running (Ready = true) -Jul 29 16:36:27.014: INFO: Pod "pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764" satisfied condition "running and ready" -STEP: Cleaning up the secret 07/29/23 16:36:27.019 -STEP: Cleaning up the configmap 07/29/23 16:36:27.03 -STEP: Cleaning up the pod 07/29/23 16:36:27.042 -[AfterEach] [sig-storage] EmptyDir wrapper volumes +[It] should delete pods created by rc when not orphaning [Conformance] + test/e2e/apimachinery/garbage_collector.go:312 +STEP: create the rc 08/24/23 12:45:28.787 +STEP: delete the rc 08/24/23 12:45:33.808 +STEP: wait for all pods to be garbage collected 08/24/23 12:45:33.822 +STEP: Gathering metrics 08/24/23 12:45:38.841 +Aug 24 12:45:38.893: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" +Aug 24 12:45:38.900: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.412189ms +Aug 24 12:45:38.900: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) +Aug 24 12:45:38.900: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" +Aug 24 12:45:39.025: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 -Jul 29 16:36:27.080: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes +Aug 24 12:45:39.025: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-wrapper-7786" for this suite. 07/29/23 16:36:27.098 +STEP: Destroying namespace "gc-4167" for this suite. 08/24/23 12:45:39.037 ------------------------------ -• [2.190 seconds] -[sig-storage] EmptyDir wrapper volumes -test/e2e/storage/utils/framework.go:23 - should not conflict [Conformance] - test/e2e/storage/empty_dir_wrapper.go:67 +• [SLOW TEST] [10.305 seconds] +[sig-api-machinery] Garbage collector +test/e2e/apimachinery/framework.go:23 + should delete pods created by rc when not orphaning [Conformance] + test/e2e/apimachinery/garbage_collector.go:312 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir wrapper volumes + [BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:36:24.926 - Jul 29 16:36:24.926: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir-wrapper 07/29/23 16:36:24.928 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:24.956 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:24.962 - [BeforeEach] [sig-storage] EmptyDir wrapper volumes + STEP: Creating a kubernetes client 08/24/23 12:45:28.745 + Aug 24 12:45:28.745: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename gc 08/24/23 12:45:28.747 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:28.777 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:28.783 + [BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 - [It] should not conflict [Conformance] - test/e2e/storage/empty_dir_wrapper.go:67 - Jul 29 16:36:24.999: INFO: Waiting up to 5m0s for pod "pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764" in namespace "emptydir-wrapper-7786" to be "running and ready" - Jul 29 16:36:25.005: INFO: Pod "pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764": Phase="Pending", Reason="", readiness=false. Elapsed: 6.149662ms - Jul 29 16:36:25.005: INFO: The phase of Pod pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:36:27.014: INFO: Pod "pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764": Phase="Running", Reason="", readiness=true. Elapsed: 2.015110318s - Jul 29 16:36:27.014: INFO: The phase of Pod pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764 is Running (Ready = true) - Jul 29 16:36:27.014: INFO: Pod "pod-secrets-6e486e70-6c29-4191-9ab1-611f67655764" satisfied condition "running and ready" - STEP: Cleaning up the secret 07/29/23 16:36:27.019 - STEP: Cleaning up the configmap 07/29/23 16:36:27.03 - STEP: Cleaning up the pod 07/29/23 16:36:27.042 - [AfterEach] [sig-storage] EmptyDir wrapper volumes + [It] should delete pods created by rc when not orphaning [Conformance] + test/e2e/apimachinery/garbage_collector.go:312 + STEP: create the rc 08/24/23 12:45:28.787 + STEP: delete the rc 08/24/23 12:45:33.808 + STEP: wait for all pods to be garbage collected 08/24/23 12:45:33.822 + STEP: Gathering metrics 08/24/23 12:45:38.841 + Aug 24 12:45:38.893: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" + Aug 24 12:45:38.900: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.412189ms + Aug 24 12:45:38.900: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) + Aug 24 12:45:38.900: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" + Aug 24 12:45:39.025: INFO: For apiserver_request_total: + For apiserver_request_latency_seconds: + For apiserver_init_events_total: + For garbage_collector_attempt_to_delete_queue_latency: + For garbage_collector_attempt_to_delete_work_duration: + For garbage_collector_attempt_to_orphan_queue_latency: + For garbage_collector_attempt_to_orphan_work_duration: + For garbage_collector_dirty_processing_latency_microseconds: + For garbage_collector_event_processing_latency_microseconds: + For garbage_collector_graph_changes_queue_latency: + For garbage_collector_graph_changes_work_duration: + For garbage_collector_orphan_processing_latency_microseconds: + For namespace_queue_latency: + For namespace_queue_latency_sum: + For namespace_queue_latency_count: + For namespace_retries: + For namespace_work_duration: + For namespace_work_duration_sum: + For namespace_work_duration_count: + For function_duration_seconds: + For errors_total: + For evicted_pods_total: + + [AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 - Jul 29 16:36:27.080: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes + Aug 24 12:45:39.025: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-wrapper-7786" for this suite. 07/29/23 16:36:27.098 + STEP: Destroying namespace "gc-4167" for this suite. 08/24/23 12:45:39.037 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSS ------------------------------ -[sig-node] NoExecuteTaintManager Multiple Pods [Serial] - evicts pods with minTolerationSeconds [Disruptive] [Conformance] - test/e2e/node/taints.go:455 -[BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] +[sig-api-machinery] ResourceQuota + should verify ResourceQuota with best effort scope. [Conformance] + test/e2e/apimachinery/resource_quota.go:803 +[BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:36:27.117 -Jul 29 16:36:27.118: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename taint-multiple-pods 07/29/23 16:36:27.119 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:27.147 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:27.152 -[BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] +STEP: Creating a kubernetes client 08/24/23 12:45:39.054 +Aug 24 12:45:39.054: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 12:45:39.056 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:39.106 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:39.109 +[BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] - test/e2e/node/taints.go:383 -Jul 29 16:36:27.156: INFO: Waiting up to 1m0s for all nodes to be ready -Jul 29 16:37:27.237: INFO: Waiting for terminating namespaces to be deleted... -[It] evicts pods with minTolerationSeconds [Disruptive] [Conformance] - test/e2e/node/taints.go:455 -Jul 29 16:37:27.247: INFO: Starting informer... -STEP: Starting pods... 07/29/23 16:37:27.248 -Jul 29 16:37:27.487: INFO: Pod1 is running on wetuj3nuajog-3. Tainting Node -Jul 29 16:37:27.706: INFO: Waiting up to 5m0s for pod "taint-eviction-b1" in namespace "taint-multiple-pods-4906" to be "running" -Jul 29 16:37:27.721: INFO: Pod "taint-eviction-b1": Phase="Pending", Reason="", readiness=false. Elapsed: 15.048161ms -Jul 29 16:37:29.731: INFO: Pod "taint-eviction-b1": Phase="Running", Reason="", readiness=true. Elapsed: 2.025610293s -Jul 29 16:37:29.732: INFO: Pod "taint-eviction-b1" satisfied condition "running" -Jul 29 16:37:29.732: INFO: Waiting up to 5m0s for pod "taint-eviction-b2" in namespace "taint-multiple-pods-4906" to be "running" -Jul 29 16:37:29.738: INFO: Pod "taint-eviction-b2": Phase="Running", Reason="", readiness=true. Elapsed: 5.989621ms -Jul 29 16:37:29.739: INFO: Pod "taint-eviction-b2" satisfied condition "running" -Jul 29 16:37:29.739: INFO: Pod2 is running on wetuj3nuajog-3. Tainting Node -STEP: Trying to apply a taint on the Node 07/29/23 16:37:29.739 -STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 07/29/23 16:37:29.764 -STEP: Waiting for Pod1 and Pod2 to be deleted 07/29/23 16:37:29.774 -Jul 29 16:37:35.551: INFO: Noticed Pod "taint-eviction-b1" gets evicted. -Jul 29 16:37:55.629: INFO: Noticed Pod "taint-eviction-b2" gets evicted. -STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 07/29/23 16:37:55.665 -[AfterEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] +[It] should verify ResourceQuota with best effort scope. [Conformance] + test/e2e/apimachinery/resource_quota.go:803 +STEP: Creating a ResourceQuota with best effort scope 08/24/23 12:45:39.113 +STEP: Ensuring ResourceQuota status is calculated 08/24/23 12:45:39.12 +STEP: Creating a ResourceQuota with not best effort scope 08/24/23 12:45:41.129 +STEP: Ensuring ResourceQuota status is calculated 08/24/23 12:45:41.137 +STEP: Creating a best-effort pod 08/24/23 12:45:43.147 +STEP: Ensuring resource quota with best effort scope captures the pod usage 08/24/23 12:45:43.171 +STEP: Ensuring resource quota with not best effort ignored the pod usage 08/24/23 12:45:45.178 +STEP: Deleting the pod 08/24/23 12:45:47.188 +STEP: Ensuring resource quota status released the pod usage 08/24/23 12:45:47.215 +STEP: Creating a not best-effort pod 08/24/23 12:45:49.225 +STEP: Ensuring resource quota with not best effort scope captures the pod usage 08/24/23 12:45:49.25 +STEP: Ensuring resource quota with best effort scope ignored the pod usage 08/24/23 12:45:51.259 +STEP: Deleting the pod 08/24/23 12:45:53.268 +STEP: Ensuring resource quota status released the pod usage 08/24/23 12:45:53.29 +[AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 16:37:55.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] +Aug 24 12:45:55.302: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "taint-multiple-pods-4906" for this suite. 07/29/23 16:37:55.713 +STEP: Destroying namespace "resourcequota-1027" for this suite. 08/24/23 12:45:55.314 ------------------------------ -• [SLOW TEST] [88.611 seconds] -[sig-node] NoExecuteTaintManager Multiple Pods [Serial] -test/e2e/node/framework.go:23 - evicts pods with minTolerationSeconds [Disruptive] [Conformance] - test/e2e/node/taints.go:455 +• [SLOW TEST] [16.277 seconds] +[sig-api-machinery] ResourceQuota +test/e2e/apimachinery/framework.go:23 + should verify ResourceQuota with best effort scope. [Conformance] + test/e2e/apimachinery/resource_quota.go:803 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:36:27.117 - Jul 29 16:36:27.118: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename taint-multiple-pods 07/29/23 16:36:27.119 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:36:27.147 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:36:27.152 - [BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + STEP: Creating a kubernetes client 08/24/23 12:45:39.054 + Aug 24 12:45:39.054: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 12:45:39.056 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:39.106 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:39.109 + [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] - test/e2e/node/taints.go:383 - Jul 29 16:36:27.156: INFO: Waiting up to 1m0s for all nodes to be ready - Jul 29 16:37:27.237: INFO: Waiting for terminating namespaces to be deleted... - [It] evicts pods with minTolerationSeconds [Disruptive] [Conformance] - test/e2e/node/taints.go:455 - Jul 29 16:37:27.247: INFO: Starting informer... - STEP: Starting pods... 07/29/23 16:37:27.248 - Jul 29 16:37:27.487: INFO: Pod1 is running on wetuj3nuajog-3. Tainting Node - Jul 29 16:37:27.706: INFO: Waiting up to 5m0s for pod "taint-eviction-b1" in namespace "taint-multiple-pods-4906" to be "running" - Jul 29 16:37:27.721: INFO: Pod "taint-eviction-b1": Phase="Pending", Reason="", readiness=false. Elapsed: 15.048161ms - Jul 29 16:37:29.731: INFO: Pod "taint-eviction-b1": Phase="Running", Reason="", readiness=true. Elapsed: 2.025610293s - Jul 29 16:37:29.732: INFO: Pod "taint-eviction-b1" satisfied condition "running" - Jul 29 16:37:29.732: INFO: Waiting up to 5m0s for pod "taint-eviction-b2" in namespace "taint-multiple-pods-4906" to be "running" - Jul 29 16:37:29.738: INFO: Pod "taint-eviction-b2": Phase="Running", Reason="", readiness=true. Elapsed: 5.989621ms - Jul 29 16:37:29.739: INFO: Pod "taint-eviction-b2" satisfied condition "running" - Jul 29 16:37:29.739: INFO: Pod2 is running on wetuj3nuajog-3. Tainting Node - STEP: Trying to apply a taint on the Node 07/29/23 16:37:29.739 - STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 07/29/23 16:37:29.764 - STEP: Waiting for Pod1 and Pod2 to be deleted 07/29/23 16:37:29.774 - Jul 29 16:37:35.551: INFO: Noticed Pod "taint-eviction-b1" gets evicted. - Jul 29 16:37:55.629: INFO: Noticed Pod "taint-eviction-b2" gets evicted. - STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute 07/29/23 16:37:55.665 - [AfterEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + [It] should verify ResourceQuota with best effort scope. [Conformance] + test/e2e/apimachinery/resource_quota.go:803 + STEP: Creating a ResourceQuota with best effort scope 08/24/23 12:45:39.113 + STEP: Ensuring ResourceQuota status is calculated 08/24/23 12:45:39.12 + STEP: Creating a ResourceQuota with not best effort scope 08/24/23 12:45:41.129 + STEP: Ensuring ResourceQuota status is calculated 08/24/23 12:45:41.137 + STEP: Creating a best-effort pod 08/24/23 12:45:43.147 + STEP: Ensuring resource quota with best effort scope captures the pod usage 08/24/23 12:45:43.171 + STEP: Ensuring resource quota with not best effort ignored the pod usage 08/24/23 12:45:45.178 + STEP: Deleting the pod 08/24/23 12:45:47.188 + STEP: Ensuring resource quota status released the pod usage 08/24/23 12:45:47.215 + STEP: Creating a not best-effort pod 08/24/23 12:45:49.225 + STEP: Ensuring resource quota with not best effort scope captures the pod usage 08/24/23 12:45:49.25 + STEP: Ensuring resource quota with best effort scope ignored the pod usage 08/24/23 12:45:51.259 + STEP: Deleting the pod 08/24/23 12:45:53.268 + STEP: Ensuring resource quota status released the pod usage 08/24/23 12:45:53.29 + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:37:55.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + Aug 24 12:45:55.302: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "taint-multiple-pods-4906" for this suite. 07/29/23 16:37:55.713 + STEP: Destroying namespace "resourcequota-1027" for this suite. 08/24/23 12:45:55.314 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath - runs ReplicaSets to verify preemption running path [Conformance] - test/e2e/scheduling/preemption.go:624 +[sig-scheduling] SchedulerPreemption [Serial] + validates lower priority pod preemption by critical pod [Conformance] + test/e2e/scheduling/preemption.go:224 [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:37:55.73 -Jul 29 16:37:55.730: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sched-preemption 07/29/23 16:37:55.733 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:37:55.764 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:37:55.771 +STEP: Creating a kubernetes client 08/24/23 12:45:55.343 +Aug 24 12:45:55.343: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sched-preemption 08/24/23 12:45:55.346 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:55.382 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:55.387 [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/scheduling/preemption.go:97 -Jul 29 16:37:55.797: INFO: Waiting up to 1m0s for all nodes to be ready -Jul 29 16:38:55.871: INFO: Waiting for terminating namespaces to be deleted... -[BeforeEach] PreemptionExecutionPath - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:38:55.923 -Jul 29 16:38:55.924: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sched-preemption-path 07/29/23 16:38:55.929 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:38:55.967 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:38:55.973 -[BeforeEach] PreemptionExecutionPath - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] PreemptionExecutionPath - test/e2e/scheduling/preemption.go:576 -STEP: Finding an available node 07/29/23 16:38:55.977 -STEP: Trying to launch a pod without a label to get a node which can launch it. 07/29/23 16:38:55.978 -Jul 29 16:38:55.993: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-preemption-path-2019" to be "running" -Jul 29 16:38:55.999: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 5.776611ms -Jul 29 16:38:58.012: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.019140903s -Jul 29 16:38:58.012: INFO: Pod "without-label" satisfied condition "running" -STEP: Explicitly delete pod here to free the resource it takes. 07/29/23 16:38:58.017 -Jul 29 16:38:58.036: INFO: found a healthy node: wetuj3nuajog-3 -[It] runs ReplicaSets to verify preemption running path [Conformance] - test/e2e/scheduling/preemption.go:624 -Jul 29 16:39:04.223: INFO: pods created so far: [1 1 1] -Jul 29 16:39:04.223: INFO: length of pods created so far: 3 -Jul 29 16:39:06.252: INFO: pods created so far: [2 2 1] -[AfterEach] PreemptionExecutionPath - test/e2e/framework/node/init/init.go:32 -Jul 29 16:39:13.255: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] PreemptionExecutionPath - test/e2e/scheduling/preemption.go:549 +Aug 24 12:45:55.474: INFO: Waiting up to 1m0s for all nodes to be ready +Aug 24 12:46:55.534: INFO: Waiting for terminating namespaces to be deleted... +[It] validates lower priority pod preemption by critical pod [Conformance] + test/e2e/scheduling/preemption.go:224 +STEP: Create pods that use 4/5 of node resources. 08/24/23 12:46:55.541 +Aug 24 12:46:55.601: INFO: Created pod: pod0-0-sched-preemption-low-priority +Aug 24 12:46:55.618: INFO: Created pod: pod0-1-sched-preemption-medium-priority +Aug 24 12:46:55.697: INFO: Created pod: pod1-0-sched-preemption-medium-priority +Aug 24 12:46:55.727: INFO: Created pod: pod1-1-sched-preemption-medium-priority +Aug 24 12:46:55.791: INFO: Created pod: pod2-0-sched-preemption-medium-priority +Aug 24 12:46:55.806: INFO: Created pod: pod2-1-sched-preemption-medium-priority +STEP: Wait for pods to be scheduled. 08/24/23 12:46:55.806 +Aug 24 12:46:55.806: INFO: Waiting up to 5m0s for pod "pod0-0-sched-preemption-low-priority" in namespace "sched-preemption-9787" to be "running" +Aug 24 12:46:55.827: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 20.951315ms +Aug 24 12:46:57.836: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 2.029626509s +Aug 24 12:46:59.834: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Running", Reason="", readiness=true. Elapsed: 4.028145233s +Aug 24 12:46:59.834: INFO: Pod "pod0-0-sched-preemption-low-priority" satisfied condition "running" +Aug 24 12:46:59.834: INFO: Waiting up to 5m0s for pod "pod0-1-sched-preemption-medium-priority" in namespace "sched-preemption-9787" to be "running" +Aug 24 12:46:59.839: INFO: Pod "pod0-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 4.717776ms +Aug 24 12:46:59.839: INFO: Pod "pod0-1-sched-preemption-medium-priority" satisfied condition "running" +Aug 24 12:46:59.839: INFO: Waiting up to 5m0s for pod "pod1-0-sched-preemption-medium-priority" in namespace "sched-preemption-9787" to be "running" +Aug 24 12:46:59.845: INFO: Pod "pod1-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.49978ms +Aug 24 12:46:59.845: INFO: Pod "pod1-0-sched-preemption-medium-priority" satisfied condition "running" +Aug 24 12:46:59.845: INFO: Waiting up to 5m0s for pod "pod1-1-sched-preemption-medium-priority" in namespace "sched-preemption-9787" to be "running" +Aug 24 12:46:59.851: INFO: Pod "pod1-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.592492ms +Aug 24 12:46:59.851: INFO: Pod "pod1-1-sched-preemption-medium-priority" satisfied condition "running" +Aug 24 12:46:59.851: INFO: Waiting up to 5m0s for pod "pod2-0-sched-preemption-medium-priority" in namespace "sched-preemption-9787" to be "running" +Aug 24 12:46:59.856: INFO: Pod "pod2-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 4.543606ms +Aug 24 12:46:59.856: INFO: Pod "pod2-0-sched-preemption-medium-priority" satisfied condition "running" +Aug 24 12:46:59.856: INFO: Waiting up to 5m0s for pod "pod2-1-sched-preemption-medium-priority" in namespace "sched-preemption-9787" to be "running" +Aug 24 12:46:59.862: INFO: Pod "pod2-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.82329ms +Aug 24 12:46:59.862: INFO: Pod "pod2-1-sched-preemption-medium-priority" satisfied condition "running" +STEP: Run a critical pod that use same resources as that of a lower priority pod 08/24/23 12:46:59.862 +Aug 24 12:46:59.881: INFO: Waiting up to 2m0s for pod "critical-pod" in namespace "kube-system" to be "running" +Aug 24 12:46:59.889: INFO: Pod "critical-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 7.362922ms +Aug 24 12:47:01.897: INFO: Pod "critical-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016014162s +Aug 24 12:47:03.900: INFO: Pod "critical-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4.019215351s +Aug 24 12:47:05.896: INFO: Pod "critical-pod": Phase="Running", Reason="", readiness=true. Elapsed: 6.014640006s +Aug 24 12:47:05.896: INFO: Pod "critical-pod" satisfied condition "running" [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:39:13.356: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:47:05.969: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/scheduling/preemption.go:84 -[DeferCleanup (Each)] PreemptionExecutionPath - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] PreemptionExecutionPath - dump namespaces | framework.go:196 -[DeferCleanup (Each)] PreemptionExecutionPath - tear down framework | framework.go:193 -STEP: Destroying namespace "sched-preemption-path-2019" for this suite. 07/29/23 16:39:13.483 [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "sched-preemption-2986" for this suite. 07/29/23 16:39:13.498 +STEP: Destroying namespace "sched-preemption-9787" for this suite. 08/24/23 12:47:06.091 ------------------------------ -• [SLOW TEST] [77.785 seconds] +• [SLOW TEST] [70.763 seconds] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/scheduling/framework.go:40 - PreemptionExecutionPath - test/e2e/scheduling/preemption.go:537 - runs ReplicaSets to verify preemption running path [Conformance] - test/e2e/scheduling/preemption.go:624 + validates lower priority pod preemption by critical pod [Conformance] + test/e2e/scheduling/preemption.go:224 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:37:55.73 - Jul 29 16:37:55.730: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sched-preemption 07/29/23 16:37:55.733 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:37:55.764 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:37:55.771 + STEP: Creating a kubernetes client 08/24/23 12:45:55.343 + Aug 24 12:45:55.343: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sched-preemption 08/24/23 12:45:55.346 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:45:55.382 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:45:55.387 [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/scheduling/preemption.go:97 - Jul 29 16:37:55.797: INFO: Waiting up to 1m0s for all nodes to be ready - Jul 29 16:38:55.871: INFO: Waiting for terminating namespaces to be deleted... - [BeforeEach] PreemptionExecutionPath - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:38:55.923 - Jul 29 16:38:55.924: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sched-preemption-path 07/29/23 16:38:55.929 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:38:55.967 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:38:55.973 - [BeforeEach] PreemptionExecutionPath - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] PreemptionExecutionPath - test/e2e/scheduling/preemption.go:576 - STEP: Finding an available node 07/29/23 16:38:55.977 - STEP: Trying to launch a pod without a label to get a node which can launch it. 07/29/23 16:38:55.978 - Jul 29 16:38:55.993: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-preemption-path-2019" to be "running" - Jul 29 16:38:55.999: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 5.776611ms - Jul 29 16:38:58.012: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.019140903s - Jul 29 16:38:58.012: INFO: Pod "without-label" satisfied condition "running" - STEP: Explicitly delete pod here to free the resource it takes. 07/29/23 16:38:58.017 - Jul 29 16:38:58.036: INFO: found a healthy node: wetuj3nuajog-3 - [It] runs ReplicaSets to verify preemption running path [Conformance] - test/e2e/scheduling/preemption.go:624 - Jul 29 16:39:04.223: INFO: pods created so far: [1 1 1] - Jul 29 16:39:04.223: INFO: length of pods created so far: 3 - Jul 29 16:39:06.252: INFO: pods created so far: [2 2 1] - [AfterEach] PreemptionExecutionPath - test/e2e/framework/node/init/init.go:32 - Jul 29 16:39:13.255: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] PreemptionExecutionPath - test/e2e/scheduling/preemption.go:549 + Aug 24 12:45:55.474: INFO: Waiting up to 1m0s for all nodes to be ready + Aug 24 12:46:55.534: INFO: Waiting for terminating namespaces to be deleted... + [It] validates lower priority pod preemption by critical pod [Conformance] + test/e2e/scheduling/preemption.go:224 + STEP: Create pods that use 4/5 of node resources. 08/24/23 12:46:55.541 + Aug 24 12:46:55.601: INFO: Created pod: pod0-0-sched-preemption-low-priority + Aug 24 12:46:55.618: INFO: Created pod: pod0-1-sched-preemption-medium-priority + Aug 24 12:46:55.697: INFO: Created pod: pod1-0-sched-preemption-medium-priority + Aug 24 12:46:55.727: INFO: Created pod: pod1-1-sched-preemption-medium-priority + Aug 24 12:46:55.791: INFO: Created pod: pod2-0-sched-preemption-medium-priority + Aug 24 12:46:55.806: INFO: Created pod: pod2-1-sched-preemption-medium-priority + STEP: Wait for pods to be scheduled. 08/24/23 12:46:55.806 + Aug 24 12:46:55.806: INFO: Waiting up to 5m0s for pod "pod0-0-sched-preemption-low-priority" in namespace "sched-preemption-9787" to be "running" + Aug 24 12:46:55.827: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 20.951315ms + Aug 24 12:46:57.836: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 2.029626509s + Aug 24 12:46:59.834: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Running", Reason="", readiness=true. Elapsed: 4.028145233s + Aug 24 12:46:59.834: INFO: Pod "pod0-0-sched-preemption-low-priority" satisfied condition "running" + Aug 24 12:46:59.834: INFO: Waiting up to 5m0s for pod "pod0-1-sched-preemption-medium-priority" in namespace "sched-preemption-9787" to be "running" + Aug 24 12:46:59.839: INFO: Pod "pod0-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 4.717776ms + Aug 24 12:46:59.839: INFO: Pod "pod0-1-sched-preemption-medium-priority" satisfied condition "running" + Aug 24 12:46:59.839: INFO: Waiting up to 5m0s for pod "pod1-0-sched-preemption-medium-priority" in namespace "sched-preemption-9787" to be "running" + Aug 24 12:46:59.845: INFO: Pod "pod1-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.49978ms + Aug 24 12:46:59.845: INFO: Pod "pod1-0-sched-preemption-medium-priority" satisfied condition "running" + Aug 24 12:46:59.845: INFO: Waiting up to 5m0s for pod "pod1-1-sched-preemption-medium-priority" in namespace "sched-preemption-9787" to be "running" + Aug 24 12:46:59.851: INFO: Pod "pod1-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.592492ms + Aug 24 12:46:59.851: INFO: Pod "pod1-1-sched-preemption-medium-priority" satisfied condition "running" + Aug 24 12:46:59.851: INFO: Waiting up to 5m0s for pod "pod2-0-sched-preemption-medium-priority" in namespace "sched-preemption-9787" to be "running" + Aug 24 12:46:59.856: INFO: Pod "pod2-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 4.543606ms + Aug 24 12:46:59.856: INFO: Pod "pod2-0-sched-preemption-medium-priority" satisfied condition "running" + Aug 24 12:46:59.856: INFO: Waiting up to 5m0s for pod "pod2-1-sched-preemption-medium-priority" in namespace "sched-preemption-9787" to be "running" + Aug 24 12:46:59.862: INFO: Pod "pod2-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.82329ms + Aug 24 12:46:59.862: INFO: Pod "pod2-1-sched-preemption-medium-priority" satisfied condition "running" + STEP: Run a critical pod that use same resources as that of a lower priority pod 08/24/23 12:46:59.862 + Aug 24 12:46:59.881: INFO: Waiting up to 2m0s for pod "critical-pod" in namespace "kube-system" to be "running" + Aug 24 12:46:59.889: INFO: Pod "critical-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 7.362922ms + Aug 24 12:47:01.897: INFO: Pod "critical-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016014162s + Aug 24 12:47:03.900: INFO: Pod "critical-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4.019215351s + Aug 24 12:47:05.896: INFO: Pod "critical-pod": Phase="Running", Reason="", readiness=true. Elapsed: 6.014640006s + Aug 24 12:47:05.896: INFO: Pod "critical-pod" satisfied condition "running" [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:39:13.356: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:47:05.969: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/scheduling/preemption.go:84 - [DeferCleanup (Each)] PreemptionExecutionPath - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] PreemptionExecutionPath - dump namespaces | framework.go:196 - [DeferCleanup (Each)] PreemptionExecutionPath - tear down framework | framework.go:193 - STEP: Destroying namespace "sched-preemption-path-2019" for this suite. 07/29/23 16:39:13.483 [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "sched-preemption-2986" for this suite. 07/29/23 16:39:13.498 + STEP: Destroying namespace "sched-preemption-9787" for this suite. 08/24/23 12:47:06.091 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSS +SSSSSSSS ------------------------------ [sig-storage] EmptyDir volumes - should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:217 + should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:197 +[BeforeEach] [sig-storage] EmptyDir volumes + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:47:06.113 +Aug 24 12:47:06.113: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:47:06.117 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:06.197 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:06.207 [BeforeEach] [sig-storage] EmptyDir volumes + test/e2e/framework/metrics/init/init.go:31 +[It] should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:197 +STEP: Creating a pod to test emptydir 0644 on node default medium 08/24/23 12:47:06.212 +Aug 24 12:47:06.231: INFO: Waiting up to 5m0s for pod "pod-8cd50240-9e2b-45da-a967-7b68dff3fea7" in namespace "emptydir-3878" to be "Succeeded or Failed" +Aug 24 12:47:06.240: INFO: Pod "pod-8cd50240-9e2b-45da-a967-7b68dff3fea7": Phase="Pending", Reason="", readiness=false. Elapsed: 9.406719ms +Aug 24 12:47:08.250: INFO: Pod "pod-8cd50240-9e2b-45da-a967-7b68dff3fea7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019130513s +Aug 24 12:47:10.248: INFO: Pod "pod-8cd50240-9e2b-45da-a967-7b68dff3fea7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017485106s +STEP: Saw pod success 08/24/23 12:47:10.248 +Aug 24 12:47:10.248: INFO: Pod "pod-8cd50240-9e2b-45da-a967-7b68dff3fea7" satisfied condition "Succeeded or Failed" +Aug 24 12:47:10.255: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-8cd50240-9e2b-45da-a967-7b68dff3fea7 container test-container: +STEP: delete the pod 08/24/23 12:47:10.285 +Aug 24 12:47:10.318: INFO: Waiting for pod pod-8cd50240-9e2b-45da-a967-7b68dff3fea7 to disappear +Aug 24 12:47:10.328: INFO: Pod pod-8cd50240-9e2b-45da-a967-7b68dff3fea7 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + test/e2e/framework/node/init/init.go:32 +Aug 24 12:47:10.328: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes + tear down framework | framework.go:193 +STEP: Destroying namespace "emptydir-3878" for this suite. 08/24/23 12:47:10.346 +------------------------------ +• [4.250 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:197 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-storage] EmptyDir volumes + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:47:06.113 + Aug 24 12:47:06.113: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:47:06.117 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:06.197 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:06.207 + [BeforeEach] [sig-storage] EmptyDir volumes + test/e2e/framework/metrics/init/init.go:31 + [It] should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:197 + STEP: Creating a pod to test emptydir 0644 on node default medium 08/24/23 12:47:06.212 + Aug 24 12:47:06.231: INFO: Waiting up to 5m0s for pod "pod-8cd50240-9e2b-45da-a967-7b68dff3fea7" in namespace "emptydir-3878" to be "Succeeded or Failed" + Aug 24 12:47:06.240: INFO: Pod "pod-8cd50240-9e2b-45da-a967-7b68dff3fea7": Phase="Pending", Reason="", readiness=false. Elapsed: 9.406719ms + Aug 24 12:47:08.250: INFO: Pod "pod-8cd50240-9e2b-45da-a967-7b68dff3fea7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019130513s + Aug 24 12:47:10.248: INFO: Pod "pod-8cd50240-9e2b-45da-a967-7b68dff3fea7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017485106s + STEP: Saw pod success 08/24/23 12:47:10.248 + Aug 24 12:47:10.248: INFO: Pod "pod-8cd50240-9e2b-45da-a967-7b68dff3fea7" satisfied condition "Succeeded or Failed" + Aug 24 12:47:10.255: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-8cd50240-9e2b-45da-a967-7b68dff3fea7 container test-container: + STEP: delete the pod 08/24/23 12:47:10.285 + Aug 24 12:47:10.318: INFO: Waiting for pod pod-8cd50240-9e2b-45da-a967-7b68dff3fea7 to disappear + Aug 24 12:47:10.328: INFO: Pod pod-8cd50240-9e2b-45da-a967-7b68dff3fea7 no longer exists + [AfterEach] [sig-storage] EmptyDir volumes + test/e2e/framework/node/init/init.go:32 + Aug 24 12:47:10.328: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + tear down framework | framework.go:193 + STEP: Destroying namespace "emptydir-3878" for this suite. 08/24/23 12:47:10.346 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should complete a service status lifecycle [Conformance] + test/e2e/network/service.go:3428 +[BeforeEach] [sig-network] Services + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:47:10.367 +Aug 24 12:47:10.367: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 12:47:10.372 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:10.407 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:10.416 +[BeforeEach] [sig-network] Services + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should complete a service status lifecycle [Conformance] + test/e2e/network/service.go:3428 +STEP: creating a Service 08/24/23 12:47:10.432 +STEP: watching for the Service to be added 08/24/23 12:47:10.462 +Aug 24 12:47:10.465: INFO: Found Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service-static:true] & ports [{http TCP 80 {0 80 } 0}] +Aug 24 12:47:10.466: INFO: Service test-service-s2vz7 created +STEP: Getting /status 08/24/23 12:47:10.467 +Aug 24 12:47:10.478: INFO: Service test-service-s2vz7 has LoadBalancer: {[]} +STEP: patching the ServiceStatus 08/24/23 12:47:10.479 +STEP: watching for the Service to be patched 08/24/23 12:47:10.498 +Aug 24 12:47:10.502: INFO: observed Service test-service-s2vz7 in namespace services-8062 with annotations: map[] & LoadBalancer: {[]} +Aug 24 12:47:10.502: INFO: Found Service test-service-s2vz7 in namespace services-8062 with annotations: map[patchedstatus:true] & LoadBalancer: {[{203.0.113.1 []}]} +Aug 24 12:47:10.502: INFO: Service test-service-s2vz7 has service status patched +STEP: updating the ServiceStatus 08/24/23 12:47:10.502 +Aug 24 12:47:10.528: INFO: updatedStatus.Conditions: []v1.Condition{v1.Condition{Type:"StatusUpdate", Status:"True", ObservedGeneration:0, LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the Service to be updated 08/24/23 12:47:10.528 +Aug 24 12:47:10.532: INFO: Observed Service test-service-s2vz7 in namespace services-8062 with annotations: map[] & Conditions: {[]} +Aug 24 12:47:10.533: INFO: Observed event: &Service{ObjectMeta:{test-service-s2vz7 services-8062 549e2de0-6d72-45c1-8f74-27d36156db0e 25498 0 2023-08-24 12:47:10 +0000 UTC map[test-service-static:true] map[patchedstatus:true] [] [] [{e2e.test Update v1 2023-08-24 12:47:10 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:test-service-static":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:sessionAffinity":{},"f:type":{}}} } {e2e.test Update v1 2023-08-24 12:47:10 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:patchedstatus":{}}},"f:status":{"f:loadBalancer":{"f:ingress":{}}}} status}]},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 80 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{},ClusterIP:10.233.38.185,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.233.38.185],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{LoadBalancerIngress{IP:203.0.113.1,Hostname:,Ports:[]PortStatus{},},},},Conditions:[]Condition{},},} +Aug 24 12:47:10.534: INFO: Found Service test-service-s2vz7 in namespace services-8062 with annotations: map[patchedstatus:true] & Conditions: [{StatusUpdate True 0 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] +Aug 24 12:47:10.534: INFO: Service test-service-s2vz7 has service status updated +STEP: patching the service 08/24/23 12:47:10.534 +STEP: watching for the Service to be patched 08/24/23 12:47:10.558 +Aug 24 12:47:10.562: INFO: observed Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service-static:true] +Aug 24 12:47:10.563: INFO: observed Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service-static:true] +Aug 24 12:47:10.563: INFO: observed Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service-static:true] +Aug 24 12:47:10.564: INFO: Found Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service:patched test-service-static:true] +Aug 24 12:47:10.564: INFO: Service test-service-s2vz7 patched +STEP: deleting the service 08/24/23 12:47:10.565 +STEP: watching for the Service to be deleted 08/24/23 12:47:10.594 +Aug 24 12:47:10.599: INFO: Observed event: ADDED +Aug 24 12:47:10.599: INFO: Observed event: MODIFIED +Aug 24 12:47:10.599: INFO: Observed event: MODIFIED +Aug 24 12:47:10.599: INFO: Observed event: MODIFIED +Aug 24 12:47:10.599: INFO: Found Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service:patched test-service-static:true] & annotations: map[patchedstatus:true] +Aug 24 12:47:10.600: INFO: Service test-service-s2vz7 deleted +[AfterEach] [sig-network] Services + test/e2e/framework/node/init/init.go:32 +Aug 24 12:47:10.600: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-network] Services + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-network] Services + tear down framework | framework.go:193 +STEP: Destroying namespace "services-8062" for this suite. 08/24/23 12:47:10.614 +------------------------------ +• [0.265 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should complete a service status lifecycle [Conformance] + test/e2e/network/service.go:3428 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-network] Services + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:47:10.367 + Aug 24 12:47:10.367: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 12:47:10.372 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:10.407 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:10.416 + [BeforeEach] [sig-network] Services + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should complete a service status lifecycle [Conformance] + test/e2e/network/service.go:3428 + STEP: creating a Service 08/24/23 12:47:10.432 + STEP: watching for the Service to be added 08/24/23 12:47:10.462 + Aug 24 12:47:10.465: INFO: Found Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service-static:true] & ports [{http TCP 80 {0 80 } 0}] + Aug 24 12:47:10.466: INFO: Service test-service-s2vz7 created + STEP: Getting /status 08/24/23 12:47:10.467 + Aug 24 12:47:10.478: INFO: Service test-service-s2vz7 has LoadBalancer: {[]} + STEP: patching the ServiceStatus 08/24/23 12:47:10.479 + STEP: watching for the Service to be patched 08/24/23 12:47:10.498 + Aug 24 12:47:10.502: INFO: observed Service test-service-s2vz7 in namespace services-8062 with annotations: map[] & LoadBalancer: {[]} + Aug 24 12:47:10.502: INFO: Found Service test-service-s2vz7 in namespace services-8062 with annotations: map[patchedstatus:true] & LoadBalancer: {[{203.0.113.1 []}]} + Aug 24 12:47:10.502: INFO: Service test-service-s2vz7 has service status patched + STEP: updating the ServiceStatus 08/24/23 12:47:10.502 + Aug 24 12:47:10.528: INFO: updatedStatus.Conditions: []v1.Condition{v1.Condition{Type:"StatusUpdate", Status:"True", ObservedGeneration:0, LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} + STEP: watching for the Service to be updated 08/24/23 12:47:10.528 + Aug 24 12:47:10.532: INFO: Observed Service test-service-s2vz7 in namespace services-8062 with annotations: map[] & Conditions: {[]} + Aug 24 12:47:10.533: INFO: Observed event: &Service{ObjectMeta:{test-service-s2vz7 services-8062 549e2de0-6d72-45c1-8f74-27d36156db0e 25498 0 2023-08-24 12:47:10 +0000 UTC map[test-service-static:true] map[patchedstatus:true] [] [] [{e2e.test Update v1 2023-08-24 12:47:10 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:test-service-static":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:sessionAffinity":{},"f:type":{}}} } {e2e.test Update v1 2023-08-24 12:47:10 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:patchedstatus":{}}},"f:status":{"f:loadBalancer":{"f:ingress":{}}}} status}]},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 80 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{},ClusterIP:10.233.38.185,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.233.38.185],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{LoadBalancerIngress{IP:203.0.113.1,Hostname:,Ports:[]PortStatus{},},},},Conditions:[]Condition{},},} + Aug 24 12:47:10.534: INFO: Found Service test-service-s2vz7 in namespace services-8062 with annotations: map[patchedstatus:true] & Conditions: [{StatusUpdate True 0 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] + Aug 24 12:47:10.534: INFO: Service test-service-s2vz7 has service status updated + STEP: patching the service 08/24/23 12:47:10.534 + STEP: watching for the Service to be patched 08/24/23 12:47:10.558 + Aug 24 12:47:10.562: INFO: observed Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service-static:true] + Aug 24 12:47:10.563: INFO: observed Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service-static:true] + Aug 24 12:47:10.563: INFO: observed Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service-static:true] + Aug 24 12:47:10.564: INFO: Found Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service:patched test-service-static:true] + Aug 24 12:47:10.564: INFO: Service test-service-s2vz7 patched + STEP: deleting the service 08/24/23 12:47:10.565 + STEP: watching for the Service to be deleted 08/24/23 12:47:10.594 + Aug 24 12:47:10.599: INFO: Observed event: ADDED + Aug 24 12:47:10.599: INFO: Observed event: MODIFIED + Aug 24 12:47:10.599: INFO: Observed event: MODIFIED + Aug 24 12:47:10.599: INFO: Observed event: MODIFIED + Aug 24 12:47:10.599: INFO: Found Service test-service-s2vz7 in namespace services-8062 with labels: map[test-service:patched test-service-static:true] & annotations: map[patchedstatus:true] + Aug 24 12:47:10.600: INFO: Service test-service-s2vz7 deleted + [AfterEach] [sig-network] Services + test/e2e/framework/node/init/init.go:32 + Aug 24 12:47:10.600: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-network] Services + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-network] Services + tear down framework | framework.go:193 + STEP: Destroying namespace "services-8062" for this suite. 08/24/23 12:47:10.614 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-instrumentation] Events + should manage the lifecycle of an event [Conformance] + test/e2e/instrumentation/core_events.go:57 +[BeforeEach] [sig-instrumentation] Events set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:39:13.518 -Jul 29 16:39:13.519: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 16:39:13.521 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:13.555 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:13.561 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 12:47:10.636 +Aug 24 12:47:10.636: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename events 08/24/23 12:47:10.639 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:10.673 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:10.682 +[BeforeEach] [sig-instrumentation] Events test/e2e/framework/metrics/init/init.go:31 -[It] should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:217 -STEP: Creating a pod to test emptydir 0777 on node default medium 07/29/23 16:39:13.566 -Jul 29 16:39:13.585: INFO: Waiting up to 5m0s for pod "pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9" in namespace "emptydir-9716" to be "Succeeded or Failed" -Jul 29 16:39:13.596: INFO: Pod "pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9": Phase="Pending", Reason="", readiness=false. Elapsed: 10.928419ms -Jul 29 16:39:15.614: INFO: Pod "pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.029294588s -Jul 29 16:39:17.606: INFO: Pod "pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020844116s -STEP: Saw pod success 07/29/23 16:39:17.606 -Jul 29 16:39:17.606: INFO: Pod "pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9" satisfied condition "Succeeded or Failed" -Jul 29 16:39:17.613: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9 container test-container: -STEP: delete the pod 07/29/23 16:39:17.654 -Jul 29 16:39:17.676: INFO: Waiting for pod pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9 to disappear -Jul 29 16:39:17.688: INFO: Pod pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[It] should manage the lifecycle of an event [Conformance] + test/e2e/instrumentation/core_events.go:57 +STEP: creating a test event 08/24/23 12:47:10.692 +STEP: listing all events in all namespaces 08/24/23 12:47:10.704 +STEP: patching the test event 08/24/23 12:47:10.717 +STEP: fetching the test event 08/24/23 12:47:10.731 +STEP: updating the test event 08/24/23 12:47:10.737 +STEP: getting the test event 08/24/23 12:47:10.757 +STEP: deleting the test event 08/24/23 12:47:10.764 +STEP: listing all events in all namespaces 08/24/23 12:47:10.783 +[AfterEach] [sig-instrumentation] Events test/e2e/framework/node/init/init.go:32 -Jul 29 16:39:17.689: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 12:47:10.792: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-instrumentation] Events test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-instrumentation] Events dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-instrumentation] Events tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-9716" for this suite. 07/29/23 16:39:17.702 +STEP: Destroying namespace "events-8265" for this suite. 08/24/23 12:47:10.801 ------------------------------ -• [4.201 seconds] -[sig-storage] EmptyDir volumes -test/e2e/common/storage/framework.go:23 - should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:217 +• [0.190 seconds] +[sig-instrumentation] Events +test/e2e/instrumentation/common/framework.go:23 + should manage the lifecycle of an event [Conformance] + test/e2e/instrumentation/core_events.go:57 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-instrumentation] Events set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:39:13.518 - Jul 29 16:39:13.519: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 16:39:13.521 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:13.555 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:13.561 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 12:47:10.636 + Aug 24 12:47:10.636: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename events 08/24/23 12:47:10.639 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:10.673 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:10.682 + [BeforeEach] [sig-instrumentation] Events test/e2e/framework/metrics/init/init.go:31 - [It] should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:217 - STEP: Creating a pod to test emptydir 0777 on node default medium 07/29/23 16:39:13.566 - Jul 29 16:39:13.585: INFO: Waiting up to 5m0s for pod "pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9" in namespace "emptydir-9716" to be "Succeeded or Failed" - Jul 29 16:39:13.596: INFO: Pod "pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9": Phase="Pending", Reason="", readiness=false. Elapsed: 10.928419ms - Jul 29 16:39:15.614: INFO: Pod "pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.029294588s - Jul 29 16:39:17.606: INFO: Pod "pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020844116s - STEP: Saw pod success 07/29/23 16:39:17.606 - Jul 29 16:39:17.606: INFO: Pod "pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9" satisfied condition "Succeeded or Failed" - Jul 29 16:39:17.613: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9 container test-container: - STEP: delete the pod 07/29/23 16:39:17.654 - Jul 29 16:39:17.676: INFO: Waiting for pod pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9 to disappear - Jul 29 16:39:17.688: INFO: Pod pod-2d9ef224-d48d-4db1-a695-00a13f7a31e9 no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [It] should manage the lifecycle of an event [Conformance] + test/e2e/instrumentation/core_events.go:57 + STEP: creating a test event 08/24/23 12:47:10.692 + STEP: listing all events in all namespaces 08/24/23 12:47:10.704 + STEP: patching the test event 08/24/23 12:47:10.717 + STEP: fetching the test event 08/24/23 12:47:10.731 + STEP: updating the test event 08/24/23 12:47:10.737 + STEP: getting the test event 08/24/23 12:47:10.757 + STEP: deleting the test event 08/24/23 12:47:10.764 + STEP: listing all events in all namespaces 08/24/23 12:47:10.783 + [AfterEach] [sig-instrumentation] Events test/e2e/framework/node/init/init.go:32 - Jul 29 16:39:17.689: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 12:47:10.792: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-instrumentation] Events test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-instrumentation] Events dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-instrumentation] Events tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-9716" for this suite. 07/29/23 16:39:17.702 + STEP: Destroying namespace "events-8265" for this suite. 08/24/23 12:47:10.801 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Ephemeral Containers [NodeConformance] - will start an ephemeral container in an existing pod [Conformance] - test/e2e/common/node/ephemeral_containers.go:45 -[BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] +[sig-apps] Daemon set [Serial] + should verify changes to a daemon set status [Conformance] + test/e2e/apps/daemon_set.go:873 +[BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:39:17.728 -Jul 29 16:39:17.728: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename ephemeral-containers-test 07/29/23 16:39:17.73 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:17.761 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:17.768 -[BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] +STEP: Creating a kubernetes client 08/24/23 12:47:10.834 +Aug 24 12:47:10.835: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename daemonsets 08/24/23 12:47:10.841 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:10.879 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:10.887 +[BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] - test/e2e/common/node/ephemeral_containers.go:38 -[It] will start an ephemeral container in an existing pod [Conformance] - test/e2e/common/node/ephemeral_containers.go:45 -STEP: creating a target pod 07/29/23 16:39:17.775 -Jul 29 16:39:17.795: INFO: Waiting up to 5m0s for pod "ephemeral-containers-target-pod" in namespace "ephemeral-containers-test-8511" to be "running and ready" -Jul 29 16:39:17.807: INFO: Pod "ephemeral-containers-target-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 11.156281ms -Jul 29 16:39:17.807: INFO: The phase of Pod ephemeral-containers-target-pod is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:39:19.815: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.018971431s -Jul 29 16:39:19.815: INFO: The phase of Pod ephemeral-containers-target-pod is Running (Ready = true) -Jul 29 16:39:19.815: INFO: Pod "ephemeral-containers-target-pod" satisfied condition "running and ready" -STEP: adding an ephemeral container 07/29/23 16:39:19.819 -Jul 29 16:39:19.852: INFO: Waiting up to 1m0s for pod "ephemeral-containers-target-pod" in namespace "ephemeral-containers-test-8511" to be "container debugger running" -Jul 29 16:39:19.869: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 17.007438ms -Jul 29 16:39:21.881: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.029520975s -Jul 29 16:39:23.917: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 4.064776749s -Jul 29 16:39:23.917: INFO: Pod "ephemeral-containers-target-pod" satisfied condition "container debugger running" -STEP: checking pod container endpoints 07/29/23 16:39:23.917 -Jul 29 16:39:23.918: INFO: ExecWithOptions {Command:[/bin/echo marco] Namespace:ephemeral-containers-test-8511 PodName:ephemeral-containers-target-pod ContainerName:debugger Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:23.918: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:23.920: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:23.920: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/ephemeral-containers-test-8511/pods/ephemeral-containers-target-pod/exec?command=%2Fbin%2Fecho&command=marco&container=debugger&container=debugger&stderr=true&stdout=true) -Jul 29 16:39:24.041: INFO: Exec stderr: "" -[AfterEach] [sig-node] Ephemeral Containers [NodeConformance] +[BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 +[It] should verify changes to a daemon set status [Conformance] + test/e2e/apps/daemon_set.go:873 +STEP: Creating simple DaemonSet "daemon-set" 08/24/23 12:47:10.961 +STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 12:47:10.971 +Aug 24 12:47:10.990: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:47:10.990: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:47:12.099: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:47:12.099: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:47:13.027: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 +Aug 24 12:47:13.028: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:47:14.013: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 +Aug 24 12:47:14.013: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set +STEP: Getting /status 08/24/23 12:47:14.022 +Aug 24 12:47:14.032: INFO: Daemon Set daemon-set has Conditions: [] +STEP: updating the DaemonSet Status 08/24/23 12:47:14.032 +Aug 24 12:47:14.052: INFO: updatedStatus.Conditions: []v1.DaemonSetCondition{v1.DaemonSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the daemon set status to be updated 08/24/23 12:47:14.052 +Aug 24 12:47:14.057: INFO: Observed &DaemonSet event: ADDED +Aug 24 12:47:14.058: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.059: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.059: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.060: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.061: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.061: INFO: Found daemon set daemon-set in namespace daemonsets-4892 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] +Aug 24 12:47:14.061: INFO: Daemon set daemon-set has an updated status +STEP: patching the DaemonSet Status 08/24/23 12:47:14.062 +STEP: watching for the daemon set status to be patched 08/24/23 12:47:14.077 +Aug 24 12:47:14.081: INFO: Observed &DaemonSet event: ADDED +Aug 24 12:47:14.081: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.082: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.082: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.083: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.083: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.084: INFO: Observed daemon set daemon-set in namespace daemonsets-4892 with annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] +Aug 24 12:47:14.084: INFO: Observed &DaemonSet event: MODIFIED +Aug 24 12:47:14.084: INFO: Found daemon set daemon-set in namespace daemonsets-4892 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusPatched True 0001-01-01 00:00:00 +0000 UTC }] +Aug 24 12:47:14.084: INFO: Daemon set daemon-set has a patched status +[AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 +STEP: Deleting DaemonSet "daemon-set" 08/24/23 12:47:14.095 +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-4892, will wait for the garbage collector to delete the pods 08/24/23 12:47:14.095 +Aug 24 12:47:14.167: INFO: Deleting DaemonSet.extensions daemon-set took: 16.497703ms +Aug 24 12:47:14.269: INFO: Terminating DaemonSet.extensions daemon-set pods took: 101.232696ms +Aug 24 12:47:16.375: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:47:16.375: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set +Aug 24 12:47:16.380: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"25654"},"items":null} + +Aug 24 12:47:16.389: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"25654"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:39:24.056: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] +Aug 24 12:47:16.439: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "ephemeral-containers-test-8511" for this suite. 07/29/23 16:39:24.066 +STEP: Destroying namespace "daemonsets-4892" for this suite. 08/24/23 12:47:16.46 ------------------------------ -• [SLOW TEST] [6.358 seconds] -[sig-node] Ephemeral Containers [NodeConformance] -test/e2e/common/node/framework.go:23 - will start an ephemeral container in an existing pod [Conformance] - test/e2e/common/node/ephemeral_containers.go:45 +• [SLOW TEST] [5.639 seconds] +[sig-apps] Daemon set [Serial] +test/e2e/apps/framework.go:23 + should verify changes to a daemon set status [Conformance] + test/e2e/apps/daemon_set.go:873 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] + [BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:39:17.728 - Jul 29 16:39:17.728: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename ephemeral-containers-test 07/29/23 16:39:17.73 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:17.761 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:17.768 - [BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] + STEP: Creating a kubernetes client 08/24/23 12:47:10.834 + Aug 24 12:47:10.835: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename daemonsets 08/24/23 12:47:10.841 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:10.879 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:10.887 + [BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] - test/e2e/common/node/ephemeral_containers.go:38 - [It] will start an ephemeral container in an existing pod [Conformance] - test/e2e/common/node/ephemeral_containers.go:45 - STEP: creating a target pod 07/29/23 16:39:17.775 - Jul 29 16:39:17.795: INFO: Waiting up to 5m0s for pod "ephemeral-containers-target-pod" in namespace "ephemeral-containers-test-8511" to be "running and ready" - Jul 29 16:39:17.807: INFO: Pod "ephemeral-containers-target-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 11.156281ms - Jul 29 16:39:17.807: INFO: The phase of Pod ephemeral-containers-target-pod is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:39:19.815: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.018971431s - Jul 29 16:39:19.815: INFO: The phase of Pod ephemeral-containers-target-pod is Running (Ready = true) - Jul 29 16:39:19.815: INFO: Pod "ephemeral-containers-target-pod" satisfied condition "running and ready" - STEP: adding an ephemeral container 07/29/23 16:39:19.819 - Jul 29 16:39:19.852: INFO: Waiting up to 1m0s for pod "ephemeral-containers-target-pod" in namespace "ephemeral-containers-test-8511" to be "container debugger running" - Jul 29 16:39:19.869: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 17.007438ms - Jul 29 16:39:21.881: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.029520975s - Jul 29 16:39:23.917: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 4.064776749s - Jul 29 16:39:23.917: INFO: Pod "ephemeral-containers-target-pod" satisfied condition "container debugger running" - STEP: checking pod container endpoints 07/29/23 16:39:23.917 - Jul 29 16:39:23.918: INFO: ExecWithOptions {Command:[/bin/echo marco] Namespace:ephemeral-containers-test-8511 PodName:ephemeral-containers-target-pod ContainerName:debugger Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:23.918: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:23.920: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:23.920: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/ephemeral-containers-test-8511/pods/ephemeral-containers-target-pod/exec?command=%2Fbin%2Fecho&command=marco&container=debugger&container=debugger&stderr=true&stdout=true) - Jul 29 16:39:24.041: INFO: Exec stderr: "" - [AfterEach] [sig-node] Ephemeral Containers [NodeConformance] + [BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 + [It] should verify changes to a daemon set status [Conformance] + test/e2e/apps/daemon_set.go:873 + STEP: Creating simple DaemonSet "daemon-set" 08/24/23 12:47:10.961 + STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 12:47:10.971 + Aug 24 12:47:10.990: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:47:10.990: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:47:12.099: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:47:12.099: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:47:13.027: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 + Aug 24 12:47:13.028: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:47:14.013: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 + Aug 24 12:47:14.013: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set + STEP: Getting /status 08/24/23 12:47:14.022 + Aug 24 12:47:14.032: INFO: Daemon Set daemon-set has Conditions: [] + STEP: updating the DaemonSet Status 08/24/23 12:47:14.032 + Aug 24 12:47:14.052: INFO: updatedStatus.Conditions: []v1.DaemonSetCondition{v1.DaemonSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} + STEP: watching for the daemon set status to be updated 08/24/23 12:47:14.052 + Aug 24 12:47:14.057: INFO: Observed &DaemonSet event: ADDED + Aug 24 12:47:14.058: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.059: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.059: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.060: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.061: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.061: INFO: Found daemon set daemon-set in namespace daemonsets-4892 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] + Aug 24 12:47:14.061: INFO: Daemon set daemon-set has an updated status + STEP: patching the DaemonSet Status 08/24/23 12:47:14.062 + STEP: watching for the daemon set status to be patched 08/24/23 12:47:14.077 + Aug 24 12:47:14.081: INFO: Observed &DaemonSet event: ADDED + Aug 24 12:47:14.081: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.082: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.082: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.083: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.083: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.084: INFO: Observed daemon set daemon-set in namespace daemonsets-4892 with annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] + Aug 24 12:47:14.084: INFO: Observed &DaemonSet event: MODIFIED + Aug 24 12:47:14.084: INFO: Found daemon set daemon-set in namespace daemonsets-4892 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusPatched True 0001-01-01 00:00:00 +0000 UTC }] + Aug 24 12:47:14.084: INFO: Daemon set daemon-set has a patched status + [AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 + STEP: Deleting DaemonSet "daemon-set" 08/24/23 12:47:14.095 + STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-4892, will wait for the garbage collector to delete the pods 08/24/23 12:47:14.095 + Aug 24 12:47:14.167: INFO: Deleting DaemonSet.extensions daemon-set took: 16.497703ms + Aug 24 12:47:14.269: INFO: Terminating DaemonSet.extensions daemon-set pods took: 101.232696ms + Aug 24 12:47:16.375: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:47:16.375: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set + Aug 24 12:47:16.380: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"25654"},"items":null} + + Aug 24 12:47:16.389: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"25654"},"items":null} + + [AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:39:24.056: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] + Aug 24 12:47:16.439: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "ephemeral-containers-test-8511" for this suite. 07/29/23 16:39:24.066 + STEP: Destroying namespace "daemonsets-4892" for this suite. 08/24/23 12:47:16.46 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a secret. [Conformance] - test/e2e/apimachinery/resource_quota.go:160 -[BeforeEach] [sig-api-machinery] ResourceQuota +[sig-network] Services + should be able to change the type from ExternalName to NodePort [Conformance] + test/e2e/network/service.go:1477 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:39:24.091 -Jul 29 16:39:24.092: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 16:39:24.094 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:24.178 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:24.19 -[BeforeEach] [sig-api-machinery] ResourceQuota +STEP: Creating a kubernetes client 08/24/23 12:47:16.477 +Aug 24 12:47:16.477: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 12:47:16.482 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:16.525 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:16.535 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[It] should create a ResourceQuota and capture the life of a secret. [Conformance] - test/e2e/apimachinery/resource_quota.go:160 -STEP: Discovering how many secrets are in namespace by default 07/29/23 16:39:24.196 -STEP: Counting existing ResourceQuota 07/29/23 16:39:29.203 -STEP: Creating a ResourceQuota 07/29/23 16:39:34.219 -STEP: Ensuring resource quota status is calculated 07/29/23 16:39:34.236 -STEP: Creating a Secret 07/29/23 16:39:36.245 -STEP: Ensuring resource quota status captures secret creation 07/29/23 16:39:36.284 -STEP: Deleting a secret 07/29/23 16:39:38.295 -STEP: Ensuring resource quota status released usage 07/29/23 16:39:38.315 -[AfterEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should be able to change the type from ExternalName to NodePort [Conformance] + test/e2e/network/service.go:1477 +STEP: creating a service externalname-service with the type=ExternalName in namespace services-7751 08/24/23 12:47:16.545 +STEP: changing the ExternalName service to type=NodePort 08/24/23 12:47:16.568 +STEP: creating replication controller externalname-service in namespace services-7751 08/24/23 12:47:16.647 +I0824 12:47:16.680548 14 runners.go:193] Created replication controller with name: externalname-service, namespace: services-7751, replica count: 2 +I0824 12:47:19.732596 14 runners.go:193] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Aug 24 12:47:19.732: INFO: Creating new exec pod +Aug 24 12:47:19.744: INFO: Waiting up to 5m0s for pod "execpodc77gx" in namespace "services-7751" to be "running" +Aug 24 12:47:19.750: INFO: Pod "execpodc77gx": Phase="Pending", Reason="", readiness=false. Elapsed: 6.170672ms +Aug 24 12:47:21.760: INFO: Pod "execpodc77gx": Phase="Running", Reason="", readiness=true. Elapsed: 2.01645538s +Aug 24 12:47:21.760: INFO: Pod "execpodc77gx" satisfied condition "running" +Aug 24 12:47:22.773: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7751 exec execpodc77gx -- /bin/sh -x -c nc -v -z -w 2 externalname-service 80' +Aug 24 12:47:23.093: INFO: stderr: "+ nc -v -z -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" +Aug 24 12:47:23.093: INFO: stdout: "" +Aug 24 12:47:23.094: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7751 exec execpodc77gx -- /bin/sh -x -c nc -v -z -w 2 10.233.25.114 80' +Aug 24 12:47:23.331: INFO: stderr: "+ nc -v -z -w 2 10.233.25.114 80\nConnection to 10.233.25.114 80 port [tcp/http] succeeded!\n" +Aug 24 12:47:23.332: INFO: stdout: "" +Aug 24 12:47:23.332: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7751 exec execpodc77gx -- /bin/sh -x -c nc -v -z -w 2 192.168.121.130 31564' +Aug 24 12:47:23.631: INFO: stderr: "+ nc -v -z -w 2 192.168.121.130 31564\nConnection to 192.168.121.130 31564 port [tcp/*] succeeded!\n" +Aug 24 12:47:23.631: INFO: stdout: "" +Aug 24 12:47:23.631: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7751 exec execpodc77gx -- /bin/sh -x -c nc -v -z -w 2 192.168.121.111 31564' +Aug 24 12:47:23.923: INFO: stderr: "+ nc -v -z -w 2 192.168.121.111 31564\nConnection to 192.168.121.111 31564 port [tcp/*] succeeded!\n" +Aug 24 12:47:23.923: INFO: stdout: "" +Aug 24 12:47:23.923: INFO: Cleaning up the ExternalName to NodePort test service +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 16:39:40.324: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +Aug 24 12:47:23.987: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-828" for this suite. 07/29/23 16:39:40.334 +STEP: Destroying namespace "services-7751" for this suite. 08/24/23 12:47:24.002 ------------------------------ -• [SLOW TEST] [16.254 seconds] -[sig-api-machinery] ResourceQuota -test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a secret. [Conformance] - test/e2e/apimachinery/resource_quota.go:160 +• [SLOW TEST] [7.547 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should be able to change the type from ExternalName to NodePort [Conformance] + test/e2e/network/service.go:1477 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:39:24.091 - Jul 29 16:39:24.092: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 16:39:24.094 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:24.178 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:24.19 - [BeforeEach] [sig-api-machinery] ResourceQuota + STEP: Creating a kubernetes client 08/24/23 12:47:16.477 + Aug 24 12:47:16.477: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 12:47:16.482 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:16.525 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:16.535 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [It] should create a ResourceQuota and capture the life of a secret. [Conformance] - test/e2e/apimachinery/resource_quota.go:160 - STEP: Discovering how many secrets are in namespace by default 07/29/23 16:39:24.196 - STEP: Counting existing ResourceQuota 07/29/23 16:39:29.203 - STEP: Creating a ResourceQuota 07/29/23 16:39:34.219 - STEP: Ensuring resource quota status is calculated 07/29/23 16:39:34.236 - STEP: Creating a Secret 07/29/23 16:39:36.245 - STEP: Ensuring resource quota status captures secret creation 07/29/23 16:39:36.284 - STEP: Deleting a secret 07/29/23 16:39:38.295 - STEP: Ensuring resource quota status released usage 07/29/23 16:39:38.315 - [AfterEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should be able to change the type from ExternalName to NodePort [Conformance] + test/e2e/network/service.go:1477 + STEP: creating a service externalname-service with the type=ExternalName in namespace services-7751 08/24/23 12:47:16.545 + STEP: changing the ExternalName service to type=NodePort 08/24/23 12:47:16.568 + STEP: creating replication controller externalname-service in namespace services-7751 08/24/23 12:47:16.647 + I0824 12:47:16.680548 14 runners.go:193] Created replication controller with name: externalname-service, namespace: services-7751, replica count: 2 + I0824 12:47:19.732596 14 runners.go:193] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + Aug 24 12:47:19.732: INFO: Creating new exec pod + Aug 24 12:47:19.744: INFO: Waiting up to 5m0s for pod "execpodc77gx" in namespace "services-7751" to be "running" + Aug 24 12:47:19.750: INFO: Pod "execpodc77gx": Phase="Pending", Reason="", readiness=false. Elapsed: 6.170672ms + Aug 24 12:47:21.760: INFO: Pod "execpodc77gx": Phase="Running", Reason="", readiness=true. Elapsed: 2.01645538s + Aug 24 12:47:21.760: INFO: Pod "execpodc77gx" satisfied condition "running" + Aug 24 12:47:22.773: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7751 exec execpodc77gx -- /bin/sh -x -c nc -v -z -w 2 externalname-service 80' + Aug 24 12:47:23.093: INFO: stderr: "+ nc -v -z -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" + Aug 24 12:47:23.093: INFO: stdout: "" + Aug 24 12:47:23.094: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7751 exec execpodc77gx -- /bin/sh -x -c nc -v -z -w 2 10.233.25.114 80' + Aug 24 12:47:23.331: INFO: stderr: "+ nc -v -z -w 2 10.233.25.114 80\nConnection to 10.233.25.114 80 port [tcp/http] succeeded!\n" + Aug 24 12:47:23.332: INFO: stdout: "" + Aug 24 12:47:23.332: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7751 exec execpodc77gx -- /bin/sh -x -c nc -v -z -w 2 192.168.121.130 31564' + Aug 24 12:47:23.631: INFO: stderr: "+ nc -v -z -w 2 192.168.121.130 31564\nConnection to 192.168.121.130 31564 port [tcp/*] succeeded!\n" + Aug 24 12:47:23.631: INFO: stdout: "" + Aug 24 12:47:23.631: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-7751 exec execpodc77gx -- /bin/sh -x -c nc -v -z -w 2 192.168.121.111 31564' + Aug 24 12:47:23.923: INFO: stderr: "+ nc -v -z -w 2 192.168.121.111 31564\nConnection to 192.168.121.111 31564 port [tcp/*] succeeded!\n" + Aug 24 12:47:23.923: INFO: stdout: "" + Aug 24 12:47:23.923: INFO: Cleaning up the ExternalName to NodePort test service + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 16:39:40.324: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + Aug 24 12:47:23.987: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-828" for this suite. 07/29/23 16:39:40.334 + STEP: Destroying namespace "services-7751" for this suite. 08/24/23 12:47:24.002 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSS +SSSSSSSSS ------------------------------ -[sig-node] KubeletManagedEtcHosts - should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/kubelet_etc_hosts.go:63 -[BeforeEach] [sig-node] KubeletManagedEtcHosts +[sig-auth] ServiceAccounts + should guarantee kube-root-ca.crt exist in any namespace [Conformance] + test/e2e/auth/service_accounts.go:742 +[BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:39:40.347 -Jul 29 16:39:40.348: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts 07/29/23 16:39:40.351 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:40.393 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:40.411 -[BeforeEach] [sig-node] KubeletManagedEtcHosts +STEP: Creating a kubernetes client 08/24/23 12:47:24.033 +Aug 24 12:47:24.033: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename svcaccounts 08/24/23 12:47:24.036 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:24.074 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:24.08 +[BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 -[It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/kubelet_etc_hosts.go:63 -STEP: Setting up the test 07/29/23 16:39:40.426 -STEP: Creating hostNetwork=false pod 07/29/23 16:39:40.426 -Jul 29 16:39:40.458: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "e2e-kubelet-etc-hosts-4917" to be "running and ready" -Jul 29 16:39:40.472: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 13.393436ms -Jul 29 16:39:40.472: INFO: The phase of Pod test-pod is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:39:42.483: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.024231048s -Jul 29 16:39:42.483: INFO: The phase of Pod test-pod is Running (Ready = true) -Jul 29 16:39:42.483: INFO: Pod "test-pod" satisfied condition "running and ready" -STEP: Creating hostNetwork=true pod 07/29/23 16:39:42.49 -Jul 29 16:39:42.501: INFO: Waiting up to 5m0s for pod "test-host-network-pod" in namespace "e2e-kubelet-etc-hosts-4917" to be "running and ready" -Jul 29 16:39:42.508: INFO: Pod "test-host-network-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.634609ms -Jul 29 16:39:42.508: INFO: The phase of Pod test-host-network-pod is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:39:44.536: INFO: Pod "test-host-network-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.034655215s -Jul 29 16:39:44.536: INFO: The phase of Pod test-host-network-pod is Running (Ready = true) -Jul 29 16:39:44.536: INFO: Pod "test-host-network-pod" satisfied condition "running and ready" -STEP: Running the test 07/29/23 16:39:44.557 -STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false 07/29/23 16:39:44.557 -Jul 29 16:39:44.558: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:44.558: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:44.559: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:44.559: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-1&container=busybox-1&stderr=true&stdout=true) -Jul 29 16:39:44.700: INFO: Exec stderr: "" -Jul 29 16:39:44.700: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:44.700: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:44.702: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:44.702: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-1&container=busybox-1&stderr=true&stdout=true) -Jul 29 16:39:44.883: INFO: Exec stderr: "" -Jul 29 16:39:44.883: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:44.883: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:44.885: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:44.886: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-2&container=busybox-2&stderr=true&stdout=true) -Jul 29 16:39:44.989: INFO: Exec stderr: "" -Jul 29 16:39:44.990: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:44.990: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:44.992: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:44.992: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-2&container=busybox-2&stderr=true&stdout=true) -Jul 29 16:39:45.071: INFO: Exec stderr: "" -STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount 07/29/23 16:39:45.071 -Jul 29 16:39:45.071: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:45.071: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:45.074: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:45.074: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-3&container=busybox-3&stderr=true&stdout=true) -Jul 29 16:39:45.177: INFO: Exec stderr: "" -Jul 29 16:39:45.177: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:45.177: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:45.179: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:45.179: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-3&container=busybox-3&stderr=true&stdout=true) -Jul 29 16:39:45.284: INFO: Exec stderr: "" -STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true 07/29/23 16:39:45.285 -Jul 29 16:39:45.285: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:45.285: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:45.288: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:45.288: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-1&container=busybox-1&stderr=true&stdout=true) -Jul 29 16:39:45.403: INFO: Exec stderr: "" -Jul 29 16:39:45.403: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:45.403: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:45.406: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:45.406: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-1&container=busybox-1&stderr=true&stdout=true) -Jul 29 16:39:45.496: INFO: Exec stderr: "" -Jul 29 16:39:45.497: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:45.497: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:45.499: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:45.499: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-2&container=busybox-2&stderr=true&stdout=true) -Jul 29 16:39:45.617: INFO: Exec stderr: "" -Jul 29 16:39:45.618: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:39:45.618: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:39:45.623: INFO: ExecWithOptions: Clientset creation -Jul 29 16:39:45.623: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-2&container=busybox-2&stderr=true&stdout=true) -Jul 29 16:39:45.721: INFO: Exec stderr: "" -[AfterEach] [sig-node] KubeletManagedEtcHosts +[It] should guarantee kube-root-ca.crt exist in any namespace [Conformance] + test/e2e/auth/service_accounts.go:742 +Aug 24 12:47:24.096: INFO: Got root ca configmap in namespace "svcaccounts-1361" +Aug 24 12:47:24.108: INFO: Deleted root ca configmap in namespace "svcaccounts-1361" +STEP: waiting for a new root ca configmap created 08/24/23 12:47:24.609 +Aug 24 12:47:24.617: INFO: Recreated root ca configmap in namespace "svcaccounts-1361" +Aug 24 12:47:24.625: INFO: Updated root ca configmap in namespace "svcaccounts-1361" +STEP: waiting for the root ca configmap reconciled 08/24/23 12:47:25.126 +Aug 24 12:47:25.134: INFO: Reconciled root ca configmap in namespace "svcaccounts-1361" +[AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 -Jul 29 16:39:45.721: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts +Aug 24 12:47:25.135: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts +[DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts +[DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 -STEP: Destroying namespace "e2e-kubelet-etc-hosts-4917" for this suite. 07/29/23 16:39:45.734 +STEP: Destroying namespace "svcaccounts-1361" for this suite. 08/24/23 12:47:25.148 ------------------------------ -• [SLOW TEST] [5.404 seconds] -[sig-node] KubeletManagedEtcHosts -test/e2e/common/node/framework.go:23 - should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/kubelet_etc_hosts.go:63 +• [1.127 seconds] +[sig-auth] ServiceAccounts +test/e2e/auth/framework.go:23 + should guarantee kube-root-ca.crt exist in any namespace [Conformance] + test/e2e/auth/service_accounts.go:742 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] KubeletManagedEtcHosts + [BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:39:40.347 - Jul 29 16:39:40.348: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts 07/29/23 16:39:40.351 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:40.393 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:40.411 - [BeforeEach] [sig-node] KubeletManagedEtcHosts + STEP: Creating a kubernetes client 08/24/23 12:47:24.033 + Aug 24 12:47:24.033: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename svcaccounts 08/24/23 12:47:24.036 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:24.074 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:24.08 + [BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 - [It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/kubelet_etc_hosts.go:63 - STEP: Setting up the test 07/29/23 16:39:40.426 - STEP: Creating hostNetwork=false pod 07/29/23 16:39:40.426 - Jul 29 16:39:40.458: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "e2e-kubelet-etc-hosts-4917" to be "running and ready" - Jul 29 16:39:40.472: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 13.393436ms - Jul 29 16:39:40.472: INFO: The phase of Pod test-pod is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:39:42.483: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.024231048s - Jul 29 16:39:42.483: INFO: The phase of Pod test-pod is Running (Ready = true) - Jul 29 16:39:42.483: INFO: Pod "test-pod" satisfied condition "running and ready" - STEP: Creating hostNetwork=true pod 07/29/23 16:39:42.49 - Jul 29 16:39:42.501: INFO: Waiting up to 5m0s for pod "test-host-network-pod" in namespace "e2e-kubelet-etc-hosts-4917" to be "running and ready" - Jul 29 16:39:42.508: INFO: Pod "test-host-network-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.634609ms - Jul 29 16:39:42.508: INFO: The phase of Pod test-host-network-pod is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:39:44.536: INFO: Pod "test-host-network-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.034655215s - Jul 29 16:39:44.536: INFO: The phase of Pod test-host-network-pod is Running (Ready = true) - Jul 29 16:39:44.536: INFO: Pod "test-host-network-pod" satisfied condition "running and ready" - STEP: Running the test 07/29/23 16:39:44.557 - STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false 07/29/23 16:39:44.557 - Jul 29 16:39:44.558: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:44.558: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:44.559: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:44.559: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-1&container=busybox-1&stderr=true&stdout=true) - Jul 29 16:39:44.700: INFO: Exec stderr: "" - Jul 29 16:39:44.700: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:44.700: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:44.702: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:44.702: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-1&container=busybox-1&stderr=true&stdout=true) - Jul 29 16:39:44.883: INFO: Exec stderr: "" - Jul 29 16:39:44.883: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:44.883: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:44.885: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:44.886: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-2&container=busybox-2&stderr=true&stdout=true) - Jul 29 16:39:44.989: INFO: Exec stderr: "" - Jul 29 16:39:44.990: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:44.990: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:44.992: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:44.992: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-2&container=busybox-2&stderr=true&stdout=true) - Jul 29 16:39:45.071: INFO: Exec stderr: "" - STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount 07/29/23 16:39:45.071 - Jul 29 16:39:45.071: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:45.071: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:45.074: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:45.074: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-3&container=busybox-3&stderr=true&stdout=true) - Jul 29 16:39:45.177: INFO: Exec stderr: "" - Jul 29 16:39:45.177: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:45.177: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:45.179: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:45.179: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-3&container=busybox-3&stderr=true&stdout=true) - Jul 29 16:39:45.284: INFO: Exec stderr: "" - STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true 07/29/23 16:39:45.285 - Jul 29 16:39:45.285: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:45.285: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:45.288: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:45.288: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-1&container=busybox-1&stderr=true&stdout=true) - Jul 29 16:39:45.403: INFO: Exec stderr: "" - Jul 29 16:39:45.403: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:45.403: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:45.406: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:45.406: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-1&container=busybox-1&stderr=true&stdout=true) - Jul 29 16:39:45.496: INFO: Exec stderr: "" - Jul 29 16:39:45.497: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:45.497: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:45.499: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:45.499: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts&container=busybox-2&container=busybox-2&stderr=true&stdout=true) - Jul 29 16:39:45.617: INFO: Exec stderr: "" - Jul 29 16:39:45.618: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-4917 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:39:45.618: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:39:45.623: INFO: ExecWithOptions: Clientset creation - Jul 29 16:39:45.623: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/e2e-kubelet-etc-hosts-4917/pods/test-host-network-pod/exec?command=cat&command=%2Fetc%2Fhosts-original&container=busybox-2&container=busybox-2&stderr=true&stdout=true) - Jul 29 16:39:45.721: INFO: Exec stderr: "" - [AfterEach] [sig-node] KubeletManagedEtcHosts + [It] should guarantee kube-root-ca.crt exist in any namespace [Conformance] + test/e2e/auth/service_accounts.go:742 + Aug 24 12:47:24.096: INFO: Got root ca configmap in namespace "svcaccounts-1361" + Aug 24 12:47:24.108: INFO: Deleted root ca configmap in namespace "svcaccounts-1361" + STEP: waiting for a new root ca configmap created 08/24/23 12:47:24.609 + Aug 24 12:47:24.617: INFO: Recreated root ca configmap in namespace "svcaccounts-1361" + Aug 24 12:47:24.625: INFO: Updated root ca configmap in namespace "svcaccounts-1361" + STEP: waiting for the root ca configmap reconciled 08/24/23 12:47:25.126 + Aug 24 12:47:25.134: INFO: Reconciled root ca configmap in namespace "svcaccounts-1361" + [AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 - Jul 29 16:39:45.721: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts + Aug 24 12:47:25.135: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts + [DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] KubeletManagedEtcHosts + [DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 - STEP: Destroying namespace "e2e-kubelet-etc-hosts-4917" for this suite. 07/29/23 16:39:45.734 + STEP: Destroying namespace "svcaccounts-1361" for this suite. 08/24/23 12:47:25.148 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Kubelet when scheduling a read only busybox container - should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:184 -[BeforeEach] [sig-node] Kubelet +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should be able to deny pod and configmap creation [Conformance] + test/e2e/apimachinery/webhook.go:197 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:39:45.756 -Jul 29 16:39:45.756: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubelet-test 07/29/23 16:39:45.76 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:45.79 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:45.795 -[BeforeEach] [sig-node] Kubelet +STEP: Creating a kubernetes client 08/24/23 12:47:25.166 +Aug 24 12:47:25.167: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 12:47:25.168 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:25.196 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:25.202 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Kubelet - test/e2e/common/node/kubelet.go:41 -[It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:184 -Jul 29 16:39:45.824: INFO: Waiting up to 5m0s for pod "busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed" in namespace "kubelet-test-3277" to be "running and ready" -Jul 29 16:39:45.832: INFO: Pod "busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed": Phase="Pending", Reason="", readiness=false. Elapsed: 8.486187ms -Jul 29 16:39:45.832: INFO: The phase of Pod busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:39:47.840: INFO: Pod "busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed": Phase="Running", Reason="", readiness=true. Elapsed: 2.016683565s -Jul 29 16:39:47.840: INFO: The phase of Pod busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed is Running (Ready = true) -Jul 29 16:39:47.840: INFO: Pod "busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed" satisfied condition "running and ready" -[AfterEach] [sig-node] Kubelet +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 12:47:25.23 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:47:25.662 +STEP: Deploying the webhook pod 08/24/23 12:47:25.68 +STEP: Wait for the deployment to be ready 08/24/23 12:47:25.706 +Aug 24 12:47:25.720: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +STEP: Deploying the webhook service 08/24/23 12:47:27.74 +STEP: Verifying the service has paired with the endpoint 08/24/23 12:47:27.763 +Aug 24 12:47:28.764: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should be able to deny pod and configmap creation [Conformance] + test/e2e/apimachinery/webhook.go:197 +STEP: Registering the webhook via the AdmissionRegistration API 08/24/23 12:47:28.771 +STEP: create a pod that should be denied by the webhook 08/24/23 12:47:28.812 +STEP: create a pod that causes the webhook to hang 08/24/23 12:47:28.84 +STEP: create a configmap that should be denied by the webhook 08/24/23 12:47:38.856 +STEP: create a configmap that should be admitted by the webhook 08/24/23 12:47:38.884 +STEP: update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook 08/24/23 12:47:38.906 +STEP: update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook 08/24/23 12:47:38.922 +STEP: create a namespace that bypass the webhook 08/24/23 12:47:38.933 +STEP: create a configmap that violates the webhook policy but is in a whitelisted namespace 08/24/23 12:47:38.948 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:39:47.881: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Kubelet +Aug 24 12:47:39.017: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Kubelet +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Kubelet +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "kubelet-test-3277" for this suite. 07/29/23 16:39:47.918 +STEP: Destroying namespace "webhook-1648" for this suite. 08/24/23 12:47:39.169 +STEP: Destroying namespace "webhook-1648-markers" for this suite. 08/24/23 12:47:39.184 ------------------------------ -• [2.174 seconds] -[sig-node] Kubelet -test/e2e/common/node/framework.go:23 - when scheduling a read only busybox container - test/e2e/common/node/kubelet.go:175 - should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:184 +• [SLOW TEST] [14.083 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should be able to deny pod and configmap creation [Conformance] + test/e2e/apimachinery/webhook.go:197 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Kubelet + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:39:45.756 - Jul 29 16:39:45.756: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubelet-test 07/29/23 16:39:45.76 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:45.79 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:45.795 - [BeforeEach] [sig-node] Kubelet + STEP: Creating a kubernetes client 08/24/23 12:47:25.166 + Aug 24 12:47:25.167: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 12:47:25.168 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:25.196 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:25.202 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Kubelet - test/e2e/common/node/kubelet.go:41 - [It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:184 - Jul 29 16:39:45.824: INFO: Waiting up to 5m0s for pod "busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed" in namespace "kubelet-test-3277" to be "running and ready" - Jul 29 16:39:45.832: INFO: Pod "busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed": Phase="Pending", Reason="", readiness=false. Elapsed: 8.486187ms - Jul 29 16:39:45.832: INFO: The phase of Pod busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:39:47.840: INFO: Pod "busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed": Phase="Running", Reason="", readiness=true. Elapsed: 2.016683565s - Jul 29 16:39:47.840: INFO: The phase of Pod busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed is Running (Ready = true) - Jul 29 16:39:47.840: INFO: Pod "busybox-readonly-fsf2498f0c-605e-49bf-a858-02be1ba6d2ed" satisfied condition "running and ready" - [AfterEach] [sig-node] Kubelet + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 12:47:25.23 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:47:25.662 + STEP: Deploying the webhook pod 08/24/23 12:47:25.68 + STEP: Wait for the deployment to be ready 08/24/23 12:47:25.706 + Aug 24 12:47:25.720: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created + STEP: Deploying the webhook service 08/24/23 12:47:27.74 + STEP: Verifying the service has paired with the endpoint 08/24/23 12:47:27.763 + Aug 24 12:47:28.764: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should be able to deny pod and configmap creation [Conformance] + test/e2e/apimachinery/webhook.go:197 + STEP: Registering the webhook via the AdmissionRegistration API 08/24/23 12:47:28.771 + STEP: create a pod that should be denied by the webhook 08/24/23 12:47:28.812 + STEP: create a pod that causes the webhook to hang 08/24/23 12:47:28.84 + STEP: create a configmap that should be denied by the webhook 08/24/23 12:47:38.856 + STEP: create a configmap that should be admitted by the webhook 08/24/23 12:47:38.884 + STEP: update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook 08/24/23 12:47:38.906 + STEP: update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook 08/24/23 12:47:38.922 + STEP: create a namespace that bypass the webhook 08/24/23 12:47:38.933 + STEP: create a configmap that violates the webhook policy but is in a whitelisted namespace 08/24/23 12:47:38.948 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:39:47.881: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Kubelet + Aug 24 12:47:39.017: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Kubelet + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Kubelet + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "kubelet-test-3277" for this suite. 07/29/23 16:39:47.918 + STEP: Destroying namespace "webhook-1648" for this suite. 08/24/23 12:47:39.169 + STEP: Destroying namespace "webhook-1648-markers" for this suite. 08/24/23 12:47:39.184 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSS +SSSSSSSS ------------------------------ -[sig-storage] Secrets - should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:125 -[BeforeEach] [sig-storage] Secrets +[sig-apps] ReplicationController + should surface a failure condition on a common issue like exceeded quota [Conformance] + test/e2e/apps/rc.go:83 +[BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:39:47.938 -Jul 29 16:39:47.938: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 16:39:47.941 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:47.963 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:47.97 -[BeforeEach] [sig-storage] Secrets +STEP: Creating a kubernetes client 08/24/23 12:47:39.259 +Aug 24 12:47:39.259: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replication-controller 08/24/23 12:47:39.264 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:39.302 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:39.309 +[BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:125 -STEP: Creating secret with name secret-test-ab1f20f3-a967-4c64-96dc-c5c5005f0e3c 07/29/23 16:39:47.975 -STEP: Creating a pod to test consume secrets 07/29/23 16:39:47.985 -Jul 29 16:39:47.999: INFO: Waiting up to 5m0s for pod "pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3" in namespace "secrets-5219" to be "Succeeded or Failed" -Jul 29 16:39:48.009: INFO: Pod "pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3": Phase="Pending", Reason="", readiness=false. Elapsed: 10.261555ms -Jul 29 16:39:50.021: INFO: Pod "pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021987781s -Jul 29 16:39:52.018: INFO: Pod "pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018346054s -STEP: Saw pod success 07/29/23 16:39:52.018 -Jul 29 16:39:52.018: INFO: Pod "pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3" satisfied condition "Succeeded or Failed" -Jul 29 16:39:52.025: INFO: Trying to get logs from node wetuj3nuajog-1 pod pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3 container secret-volume-test: -STEP: delete the pod 07/29/23 16:39:52.054 -Jul 29 16:39:52.090: INFO: Waiting for pod pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3 to disappear -Jul 29 16:39:52.097: INFO: Pod pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3 no longer exists -[AfterEach] [sig-storage] Secrets +[BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 +[It] should surface a failure condition on a common issue like exceeded quota [Conformance] + test/e2e/apps/rc.go:83 +Aug 24 12:47:39.317: INFO: Creating quota "condition-test" that allows only two pods to run in the current namespace +STEP: Creating rc "condition-test" that asks for more than the allowed pod quota 08/24/23 12:47:40.345 +STEP: Checking rc "condition-test" has the desired failure condition set 08/24/23 12:47:40.36 +STEP: Scaling down rc "condition-test" to satisfy pod quota 08/24/23 12:47:41.389 +Aug 24 12:47:41.424: INFO: Updating replication controller "condition-test" +STEP: Checking rc "condition-test" has no failure condition set 08/24/23 12:47:41.424 +[AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 -Jul 29 16:39:52.097: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Secrets +Aug 24 12:47:42.440: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-5219" for this suite. 07/29/23 16:39:52.12 +STEP: Destroying namespace "replication-controller-423" for this suite. 08/24/23 12:47:42.453 ------------------------------ -• [4.223 seconds] -[sig-storage] Secrets -test/e2e/common/storage/framework.go:23 - should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:125 +• [3.207 seconds] +[sig-apps] ReplicationController +test/e2e/apps/framework.go:23 + should surface a failure condition on a common issue like exceeded quota [Conformance] + test/e2e/apps/rc.go:83 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Secrets + [BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:39:47.938 - Jul 29 16:39:47.938: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 16:39:47.941 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:47.963 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:47.97 - [BeforeEach] [sig-storage] Secrets + STEP: Creating a kubernetes client 08/24/23 12:47:39.259 + Aug 24 12:47:39.259: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replication-controller 08/24/23 12:47:39.264 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:39.302 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:39.309 + [BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:125 - STEP: Creating secret with name secret-test-ab1f20f3-a967-4c64-96dc-c5c5005f0e3c 07/29/23 16:39:47.975 - STEP: Creating a pod to test consume secrets 07/29/23 16:39:47.985 - Jul 29 16:39:47.999: INFO: Waiting up to 5m0s for pod "pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3" in namespace "secrets-5219" to be "Succeeded or Failed" - Jul 29 16:39:48.009: INFO: Pod "pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3": Phase="Pending", Reason="", readiness=false. Elapsed: 10.261555ms - Jul 29 16:39:50.021: INFO: Pod "pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021987781s - Jul 29 16:39:52.018: INFO: Pod "pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018346054s - STEP: Saw pod success 07/29/23 16:39:52.018 - Jul 29 16:39:52.018: INFO: Pod "pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3" satisfied condition "Succeeded or Failed" - Jul 29 16:39:52.025: INFO: Trying to get logs from node wetuj3nuajog-1 pod pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3 container secret-volume-test: - STEP: delete the pod 07/29/23 16:39:52.054 - Jul 29 16:39:52.090: INFO: Waiting for pod pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3 to disappear - Jul 29 16:39:52.097: INFO: Pod pod-secrets-8985dbd5-d7fc-49f9-8d8c-f847afee52c3 no longer exists - [AfterEach] [sig-storage] Secrets + [BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 + [It] should surface a failure condition on a common issue like exceeded quota [Conformance] + test/e2e/apps/rc.go:83 + Aug 24 12:47:39.317: INFO: Creating quota "condition-test" that allows only two pods to run in the current namespace + STEP: Creating rc "condition-test" that asks for more than the allowed pod quota 08/24/23 12:47:40.345 + STEP: Checking rc "condition-test" has the desired failure condition set 08/24/23 12:47:40.36 + STEP: Scaling down rc "condition-test" to satisfy pod quota 08/24/23 12:47:41.389 + Aug 24 12:47:41.424: INFO: Updating replication controller "condition-test" + STEP: Checking rc "condition-test" has no failure condition set 08/24/23 12:47:41.424 + [AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 - Jul 29 16:39:52.097: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Secrets + Aug 24 12:47:42.440: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-5219" for this suite. 07/29/23 16:39:52.12 + STEP: Destroying namespace "replication-controller-423" for this suite. 08/24/23 12:47:42.453 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-node] Downward API - should provide host IP as an env var [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:90 -[BeforeEach] [sig-node] Downward API +[sig-network] DNS + should provide DNS for the cluster [Conformance] + test/e2e/network/dns.go:50 +[BeforeEach] [sig-network] DNS set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:39:52.167 -Jul 29 16:39:52.167: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 16:39:52.17 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:52.256 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:52.268 -[BeforeEach] [sig-node] Downward API +STEP: Creating a kubernetes client 08/24/23 12:47:42.469 +Aug 24 12:47:42.469: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename dns 08/24/23 12:47:42.471 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:42.499 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:42.506 +[BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 -[It] should provide host IP as an env var [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:90 -STEP: Creating a pod to test downward api env vars 07/29/23 16:39:52.275 -Jul 29 16:39:52.295: INFO: Waiting up to 5m0s for pod "downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea" in namespace "downward-api-6499" to be "Succeeded or Failed" -Jul 29 16:39:52.302: INFO: Pod "downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea": Phase="Pending", Reason="", readiness=false. Elapsed: 6.869958ms -Jul 29 16:39:54.309: INFO: Pod "downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014544182s -Jul 29 16:39:56.314: INFO: Pod "downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019631677s -STEP: Saw pod success 07/29/23 16:39:56.316 -Jul 29 16:39:56.317: INFO: Pod "downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea" satisfied condition "Succeeded or Failed" -Jul 29 16:39:56.326: INFO: Trying to get logs from node wetuj3nuajog-3 pod downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea container dapi-container: -STEP: delete the pod 07/29/23 16:39:56.348 -Jul 29 16:39:56.376: INFO: Waiting for pod downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea to disappear -Jul 29 16:39:56.382: INFO: Pod downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea no longer exists -[AfterEach] [sig-node] Downward API +[It] should provide DNS for the cluster [Conformance] + test/e2e/network/dns.go:50 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;sleep 1; done + 08/24/23 12:47:42.511 +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;sleep 1; done + 08/24/23 12:47:42.512 +STEP: creating a pod to probe DNS 08/24/23 12:47:42.512 +STEP: submitting the pod to kubernetes 08/24/23 12:47:42.513 +Aug 24 12:47:42.533: INFO: Waiting up to 15m0s for pod "dns-test-a21d899d-5012-4c09-8d1c-4b19f9c1d5c5" in namespace "dns-8588" to be "running" +Aug 24 12:47:42.546: INFO: Pod "dns-test-a21d899d-5012-4c09-8d1c-4b19f9c1d5c5": Phase="Pending", Reason="", readiness=false. Elapsed: 13.260961ms +Aug 24 12:47:44.560: INFO: Pod "dns-test-a21d899d-5012-4c09-8d1c-4b19f9c1d5c5": Phase="Running", Reason="", readiness=true. Elapsed: 2.027131175s +Aug 24 12:47:44.560: INFO: Pod "dns-test-a21d899d-5012-4c09-8d1c-4b19f9c1d5c5" satisfied condition "running" +STEP: retrieving the pod 08/24/23 12:47:44.56 +STEP: looking for the results for each expected name from probers 08/24/23 12:47:44.573 +Aug 24 12:47:44.630: INFO: DNS probes using dns-8588/dns-test-a21d899d-5012-4c09-8d1c-4b19f9c1d5c5 succeeded + +STEP: deleting the pod 08/24/23 12:47:44.63 +[AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 -Jul 29 16:39:56.382: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Downward API +Aug 24 12:47:44.656: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Downward API +[DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Downward API +[DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-6499" for this suite. 07/29/23 16:39:56.389 +STEP: Destroying namespace "dns-8588" for this suite. 08/24/23 12:47:44.681 ------------------------------ -• [4.235 seconds] -[sig-node] Downward API -test/e2e/common/node/framework.go:23 - should provide host IP as an env var [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:90 +• [2.262 seconds] +[sig-network] DNS +test/e2e/network/common/framework.go:23 + should provide DNS for the cluster [Conformance] + test/e2e/network/dns.go:50 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Downward API + [BeforeEach] [sig-network] DNS set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:39:52.167 - Jul 29 16:39:52.167: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 16:39:52.17 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:52.256 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:52.268 - [BeforeEach] [sig-node] Downward API + STEP: Creating a kubernetes client 08/24/23 12:47:42.469 + Aug 24 12:47:42.469: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename dns 08/24/23 12:47:42.471 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:42.499 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:42.506 + [BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 - [It] should provide host IP as an env var [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:90 - STEP: Creating a pod to test downward api env vars 07/29/23 16:39:52.275 - Jul 29 16:39:52.295: INFO: Waiting up to 5m0s for pod "downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea" in namespace "downward-api-6499" to be "Succeeded or Failed" - Jul 29 16:39:52.302: INFO: Pod "downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea": Phase="Pending", Reason="", readiness=false. Elapsed: 6.869958ms - Jul 29 16:39:54.309: INFO: Pod "downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014544182s - Jul 29 16:39:56.314: INFO: Pod "downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019631677s - STEP: Saw pod success 07/29/23 16:39:56.316 - Jul 29 16:39:56.317: INFO: Pod "downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea" satisfied condition "Succeeded or Failed" - Jul 29 16:39:56.326: INFO: Trying to get logs from node wetuj3nuajog-3 pod downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea container dapi-container: - STEP: delete the pod 07/29/23 16:39:56.348 - Jul 29 16:39:56.376: INFO: Waiting for pod downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea to disappear - Jul 29 16:39:56.382: INFO: Pod downward-api-a405b99c-ec4e-4023-8851-63dd9a8474ea no longer exists - [AfterEach] [sig-node] Downward API + [It] should provide DNS for the cluster [Conformance] + test/e2e/network/dns.go:50 + STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;sleep 1; done + 08/24/23 12:47:42.511 + STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;sleep 1; done + 08/24/23 12:47:42.512 + STEP: creating a pod to probe DNS 08/24/23 12:47:42.512 + STEP: submitting the pod to kubernetes 08/24/23 12:47:42.513 + Aug 24 12:47:42.533: INFO: Waiting up to 15m0s for pod "dns-test-a21d899d-5012-4c09-8d1c-4b19f9c1d5c5" in namespace "dns-8588" to be "running" + Aug 24 12:47:42.546: INFO: Pod "dns-test-a21d899d-5012-4c09-8d1c-4b19f9c1d5c5": Phase="Pending", Reason="", readiness=false. Elapsed: 13.260961ms + Aug 24 12:47:44.560: INFO: Pod "dns-test-a21d899d-5012-4c09-8d1c-4b19f9c1d5c5": Phase="Running", Reason="", readiness=true. Elapsed: 2.027131175s + Aug 24 12:47:44.560: INFO: Pod "dns-test-a21d899d-5012-4c09-8d1c-4b19f9c1d5c5" satisfied condition "running" + STEP: retrieving the pod 08/24/23 12:47:44.56 + STEP: looking for the results for each expected name from probers 08/24/23 12:47:44.573 + Aug 24 12:47:44.630: INFO: DNS probes using dns-8588/dns-test-a21d899d-5012-4c09-8d1c-4b19f9c1d5c5 succeeded + + STEP: deleting the pod 08/24/23 12:47:44.63 + [AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 - Jul 29 16:39:56.382: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Downward API + Aug 24 12:47:44.656: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Downward API + [DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Downward API + [DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-6499" for this suite. 07/29/23 16:39:56.389 + STEP: Destroying namespace "dns-8588" for this suite. 08/24/23 12:47:44.681 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] Deployment - RecreateDeployment should delete old pods and create new ones [Conformance] - test/e2e/apps/deployment.go:113 -[BeforeEach] [sig-apps] Deployment +[sig-storage] Projected downwardAPI + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:68 +[BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:39:56.407 -Jul 29 16:39:56.408: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename deployment 07/29/23 16:39:56.412 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:56.441 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:56.446 -[BeforeEach] [sig-apps] Deployment +STEP: Creating a kubernetes client 08/24/23 12:47:44.742 +Aug 24 12:47:44.742: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:47:44.747 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:44.79 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:44.798 +[BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 -[It] RecreateDeployment should delete old pods and create new ones [Conformance] - test/e2e/apps/deployment.go:113 -Jul 29 16:39:56.452: INFO: Creating deployment "test-recreate-deployment" -Jul 29 16:39:56.462: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1 -Jul 29 16:39:56.480: INFO: deployment "test-recreate-deployment" doesn't have the required revision set -Jul 29 16:39:58.502: INFO: Waiting deployment "test-recreate-deployment" to complete -Jul 29 16:39:58.510: INFO: Triggering a new rollout for deployment "test-recreate-deployment" -Jul 29 16:39:58.548: INFO: Updating deployment test-recreate-deployment -Jul 29 16:39:58.548: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods -[AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 -Jul 29 16:39:58.875: INFO: Deployment "test-recreate-deployment": -&Deployment{ObjectMeta:{test-recreate-deployment deployment-9767 52cd1656-4214-4777-843b-fbd740f5e60e 28101 2 2023-07-29 16:39:56 +0000 UTC map[name:sample-pod-3] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004534798 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2023-07-29 16:39:58 +0000 UTC,LastTransitionTime:2023-07-29 16:39:58 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "test-recreate-deployment-cff6dc657" is progressing.,LastUpdateTime:2023-07-29 16:39:58 +0000 UTC,LastTransitionTime:2023-07-29 16:39:56 +0000 UTC,},},ReadyReplicas:0,CollisionCount:nil,},} - -Jul 29 16:39:58.887: INFO: New ReplicaSet "test-recreate-deployment-cff6dc657" of Deployment "test-recreate-deployment": -&ReplicaSet{ObjectMeta:{test-recreate-deployment-cff6dc657 deployment-9767 6d36db70-064e-47c7-8527-0d5a08ad7d25 28097 1 2023-07-29 16:39:58 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-recreate-deployment 52cd1656-4214-4777-843b-fbd740f5e60e 0xc004534c60 0xc004534c61}] [] [{kube-controller-manager Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"52cd1656-4214-4777-843b-fbd740f5e60e\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: cff6dc657,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004534cf8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Jul 29 16:39:58.887: INFO: All old ReplicaSets of Deployment "test-recreate-deployment": -Jul 29 16:39:58.887: INFO: &ReplicaSet{ObjectMeta:{test-recreate-deployment-795566c5cb deployment-9767 6489aab3-0a44-46ca-80bb-d8f7dc9a5649 28088 2 2023-07-29 16:39:56 +0000 UTC map[name:sample-pod-3 pod-template-hash:795566c5cb] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-recreate-deployment 52cd1656-4214-4777-843b-fbd740f5e60e 0xc004534b47 0xc004534b48}] [] [{kube-controller-manager Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"52cd1656-4214-4777-843b-fbd740f5e60e\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 795566c5cb,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:795566c5cb] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004534bf8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Jul 29 16:39:58.896: INFO: Pod "test-recreate-deployment-cff6dc657-qbpln" is not available: -&Pod{ObjectMeta:{test-recreate-deployment-cff6dc657-qbpln test-recreate-deployment-cff6dc657- deployment-9767 8afd9871-3e1a-4df2-bef0-facbb2a909bb 28100 0 2023-07-29 16:39:58 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[] [{apps/v1 ReplicaSet test-recreate-deployment-cff6dc657 6d36db70-064e-47c7-8527-0d5a08ad7d25 0xc0044d0aa0 0xc0044d0aa1}] [] [{kube-controller-manager Update v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"6d36db70-064e-47c7-8527-0d5a08ad7d25\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-b77kn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b77kn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:39:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:39:58 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:39:58 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:39:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:,StartTime:2023-07-29 16:39:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 +[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:68 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:47:44.809 +Aug 24 12:47:44.829: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3" in namespace "projected-5205" to be "Succeeded or Failed" +Aug 24 12:47:44.865: INFO: Pod "downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3": Phase="Pending", Reason="", readiness=false. Elapsed: 34.912237ms +Aug 24 12:47:46.872: INFO: Pod "downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.042632301s +Aug 24 12:47:48.873: INFO: Pod "downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.043669152s +STEP: Saw pod success 08/24/23 12:47:48.874 +Aug 24 12:47:48.874: INFO: Pod "downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3" satisfied condition "Succeeded or Failed" +Aug 24 12:47:48.881: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3 container client-container: +STEP: delete the pod 08/24/23 12:47:48.893 +Aug 24 12:47:48.919: INFO: Waiting for pod downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3 to disappear +Aug 24 12:47:48.929: INFO: Pod downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 -Jul 29 16:39:58.896: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Deployment +Aug 24 12:47:48.929: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 -STEP: Destroying namespace "deployment-9767" for this suite. 07/29/23 16:39:58.905 +STEP: Destroying namespace "projected-5205" for this suite. 08/24/23 12:47:48.941 ------------------------------ -• [2.515 seconds] -[sig-apps] Deployment -test/e2e/apps/framework.go:23 - RecreateDeployment should delete old pods and create new ones [Conformance] - test/e2e/apps/deployment.go:113 +• [4.213 seconds] +[sig-storage] Projected downwardAPI +test/e2e/common/storage/framework.go:23 + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:68 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Deployment + [BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:39:56.407 - Jul 29 16:39:56.408: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename deployment 07/29/23 16:39:56.412 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:56.441 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:56.446 - [BeforeEach] [sig-apps] Deployment + STEP: Creating a kubernetes client 08/24/23 12:47:44.742 + Aug 24 12:47:44.742: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:47:44.747 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:44.79 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:44.798 + [BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 - [It] RecreateDeployment should delete old pods and create new ones [Conformance] - test/e2e/apps/deployment.go:113 - Jul 29 16:39:56.452: INFO: Creating deployment "test-recreate-deployment" - Jul 29 16:39:56.462: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1 - Jul 29 16:39:56.480: INFO: deployment "test-recreate-deployment" doesn't have the required revision set - Jul 29 16:39:58.502: INFO: Waiting deployment "test-recreate-deployment" to complete - Jul 29 16:39:58.510: INFO: Triggering a new rollout for deployment "test-recreate-deployment" - Jul 29 16:39:58.548: INFO: Updating deployment test-recreate-deployment - Jul 29 16:39:58.548: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods - [AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 - Jul 29 16:39:58.875: INFO: Deployment "test-recreate-deployment": - &Deployment{ObjectMeta:{test-recreate-deployment deployment-9767 52cd1656-4214-4777-843b-fbd740f5e60e 28101 2 2023-07-29 16:39:56 +0000 UTC map[name:sample-pod-3] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004534798 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2023-07-29 16:39:58 +0000 UTC,LastTransitionTime:2023-07-29 16:39:58 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "test-recreate-deployment-cff6dc657" is progressing.,LastUpdateTime:2023-07-29 16:39:58 +0000 UTC,LastTransitionTime:2023-07-29 16:39:56 +0000 UTC,},},ReadyReplicas:0,CollisionCount:nil,},} - - Jul 29 16:39:58.887: INFO: New ReplicaSet "test-recreate-deployment-cff6dc657" of Deployment "test-recreate-deployment": - &ReplicaSet{ObjectMeta:{test-recreate-deployment-cff6dc657 deployment-9767 6d36db70-064e-47c7-8527-0d5a08ad7d25 28097 1 2023-07-29 16:39:58 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-recreate-deployment 52cd1656-4214-4777-843b-fbd740f5e60e 0xc004534c60 0xc004534c61}] [] [{kube-controller-manager Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"52cd1656-4214-4777-843b-fbd740f5e60e\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: cff6dc657,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004534cf8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - Jul 29 16:39:58.887: INFO: All old ReplicaSets of Deployment "test-recreate-deployment": - Jul 29 16:39:58.887: INFO: &ReplicaSet{ObjectMeta:{test-recreate-deployment-795566c5cb deployment-9767 6489aab3-0a44-46ca-80bb-d8f7dc9a5649 28088 2 2023-07-29 16:39:56 +0000 UTC map[name:sample-pod-3 pod-template-hash:795566c5cb] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-recreate-deployment 52cd1656-4214-4777-843b-fbd740f5e60e 0xc004534b47 0xc004534b48}] [] [{kube-controller-manager Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"52cd1656-4214-4777-843b-fbd740f5e60e\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 795566c5cb,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:795566c5cb] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004534bf8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - Jul 29 16:39:58.896: INFO: Pod "test-recreate-deployment-cff6dc657-qbpln" is not available: - &Pod{ObjectMeta:{test-recreate-deployment-cff6dc657-qbpln test-recreate-deployment-cff6dc657- deployment-9767 8afd9871-3e1a-4df2-bef0-facbb2a909bb 28100 0 2023-07-29 16:39:58 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[] [{apps/v1 ReplicaSet test-recreate-deployment-cff6dc657 6d36db70-064e-47c7-8527-0d5a08ad7d25 0xc0044d0aa0 0xc0044d0aa1}] [] [{kube-controller-manager Update v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"6d36db70-064e-47c7-8527-0d5a08ad7d25\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 16:39:58 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-b77kn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b77kn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:39:58 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:39:58 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:39:58 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 16:39:58 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:,StartTime:2023-07-29 16:39:58 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - [AfterEach] [sig-apps] Deployment + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 + [It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:68 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:47:44.809 + Aug 24 12:47:44.829: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3" in namespace "projected-5205" to be "Succeeded or Failed" + Aug 24 12:47:44.865: INFO: Pod "downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3": Phase="Pending", Reason="", readiness=false. Elapsed: 34.912237ms + Aug 24 12:47:46.872: INFO: Pod "downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.042632301s + Aug 24 12:47:48.873: INFO: Pod "downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.043669152s + STEP: Saw pod success 08/24/23 12:47:48.874 + Aug 24 12:47:48.874: INFO: Pod "downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3" satisfied condition "Succeeded or Failed" + Aug 24 12:47:48.881: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3 container client-container: + STEP: delete the pod 08/24/23 12:47:48.893 + Aug 24 12:47:48.919: INFO: Waiting for pod downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3 to disappear + Aug 24 12:47:48.929: INFO: Pod downwardapi-volume-d3186109-7cd4-4c0c-8d58-94207edd72d3 no longer exists + [AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 - Jul 29 16:39:58.896: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Deployment + Aug 24 12:47:48.929: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 - STEP: Destroying namespace "deployment-9767" for this suite. 07/29/23 16:39:58.905 + STEP: Destroying namespace "projected-5205" for this suite. 08/24/23 12:47:48.941 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSS ------------------------------ -[sig-apps] Daemon set [Serial] - should list and delete a collection of DaemonSets [Conformance] - test/e2e/apps/daemon_set.go:834 -[BeforeEach] [sig-apps] Daemon set [Serial] +[sig-storage] Projected secret + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:119 +[BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:39:58.937 -Jul 29 16:39:58.937: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename daemonsets 07/29/23 16:39:58.94 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:58.977 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:58.987 -[BeforeEach] [sig-apps] Daemon set [Serial] +STEP: Creating a kubernetes client 08/24/23 12:47:48.963 +Aug 24 12:47:48.963: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:47:48.965 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:48.999 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:49.004 +[BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 -[It] should list and delete a collection of DaemonSets [Conformance] - test/e2e/apps/daemon_set.go:834 -STEP: Creating simple DaemonSet "daemon-set" 07/29/23 16:39:59.036 -STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 16:39:59.051 -Jul 29 16:39:59.069: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:39:59.069: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:40:00.088: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:40:00.088: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:40:01.097: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:40:01.098: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 -Jul 29 16:40:02.095: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 -Jul 29 16:40:02.095: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set -STEP: listing all DeamonSets 07/29/23 16:40:02.101 -STEP: DeleteCollection of the DaemonSets 07/29/23 16:40:02.11 -STEP: Verify that ReplicaSets have been deleted 07/29/23 16:40:02.123 -[AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 -Jul 29 16:40:02.154: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"28165"},"items":null} - -Jul 29 16:40:02.163: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"28166"},"items":[{"metadata":{"name":"daemon-set-2vtj9","generateName":"daemon-set-","namespace":"daemonsets-2136","uid":"90559f23-88a2-4a7c-8b28-8ae6ef3ed73f","resourceVersion":"28163","creationTimestamp":"2023-07-29T16:39:59Z","deletionTimestamp":"2023-07-29T16:40:32Z","deletionGracePeriodSeconds":30,"labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"a7cb7221-172e-4896-9b42-693cef7c0728","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:39:59Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"a7cb7221-172e-4896-9b42-693cef7c0728\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:40:01Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.187\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-rdv59","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-rdv59","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"wetuj3nuajog-3","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["wetuj3nuajog-3"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:01Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:01Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"}],"hostIP":"192.168.121.141","podIP":"10.233.66.187","podIPs":[{"ip":"10.233.66.187"}],"startTime":"2023-07-29T16:39:59Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-07-29T16:40:00Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://8e3c8bb8bcace1e480b46f764716bfa217a38211175fdc74b430896a610b91c4","started":true}],"qosClass":"BestEffort"}},{"metadata":{"name":"daemon-set-j76qz","generateName":"daemon-set-","namespace":"daemonsets-2136","uid":"b7bebd63-aca7-46d1-8326-047a1b53a036","resourceVersion":"28166","creationTimestamp":"2023-07-29T16:39:59Z","deletionTimestamp":"2023-07-29T16:40:32Z","deletionGracePeriodSeconds":30,"labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"a7cb7221-172e-4896-9b42-693cef7c0728","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:39:59Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"a7cb7221-172e-4896-9b42-693cef7c0728\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:40:00Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.145\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-h8qfb","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-h8qfb","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"wetuj3nuajog-2","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["wetuj3nuajog-2"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:00Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:00Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"}],"hostIP":"192.168.121.211","podIP":"10.233.65.145","podIPs":[{"ip":"10.233.65.145"}],"startTime":"2023-07-29T16:39:59Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-07-29T16:40:00Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://2e1d775d060905d8c660dac43af4a16fce5b27d078b8b75c8e377b7f1563e02d","started":true}],"qosClass":"BestEffort"}},{"metadata":{"name":"daemon-set-v28jz","generateName":"daemon-set-","namespace":"daemonsets-2136","uid":"133b1dc9-5edd-4696-9bb6-e31968c47eef","resourceVersion":"28165","creationTimestamp":"2023-07-29T16:39:59Z","deletionTimestamp":"2023-07-29T16:40:32Z","deletionGracePeriodSeconds":30,"labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"a7cb7221-172e-4896-9b42-693cef7c0728","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:39:59Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"a7cb7221-172e-4896-9b42-693cef7c0728\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:40:00Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.14\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-ggt8s","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-ggt8s","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"wetuj3nuajog-1","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["wetuj3nuajog-1"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:00Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:00Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"}],"hostIP":"192.168.121.120","podIP":"10.233.64.14","podIPs":[{"ip":"10.233.64.14"}],"startTime":"2023-07-29T16:39:59Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-07-29T16:40:00Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://271c7de04194e83873991ca53a3b63c0e6c34842611d4a78ba3122427896f9de","started":true}],"qosClass":"BestEffort"}}]} - -[AfterEach] [sig-apps] Daemon set [Serial] +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:119 +STEP: Creating secret with name projected-secret-test-90ab91b3-b5dd-4bf8-9342-b4255699fbd5 08/24/23 12:47:49.011 +STEP: Creating a pod to test consume secrets 08/24/23 12:47:49.021 +Aug 24 12:47:49.038: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5" in namespace "projected-6838" to be "Succeeded or Failed" +Aug 24 12:47:49.044: INFO: Pod "pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5": Phase="Pending", Reason="", readiness=false. Elapsed: 5.425482ms +Aug 24 12:47:51.054: INFO: Pod "pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015873628s +Aug 24 12:47:53.053: INFO: Pod "pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014370119s +STEP: Saw pod success 08/24/23 12:47:53.053 +Aug 24 12:47:53.054: INFO: Pod "pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5" satisfied condition "Succeeded or Failed" +Aug 24 12:47:53.061: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5 container secret-volume-test: +STEP: delete the pod 08/24/23 12:47:53.076 +Aug 24 12:47:53.094: INFO: Waiting for pod pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5 to disappear +Aug 24 12:47:53.100: INFO: Pod pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5 no longer exists +[AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 -Jul 29 16:40:02.206: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +Aug 24 12:47:53.100: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +[DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +[DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 -STEP: Destroying namespace "daemonsets-2136" for this suite. 07/29/23 16:40:02.227 +STEP: Destroying namespace "projected-6838" for this suite. 08/24/23 12:47:53.11 ------------------------------ -• [3.302 seconds] -[sig-apps] Daemon set [Serial] -test/e2e/apps/framework.go:23 - should list and delete a collection of DaemonSets [Conformance] - test/e2e/apps/daemon_set.go:834 +• [4.160 seconds] +[sig-storage] Projected secret +test/e2e/common/storage/framework.go:23 + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:119 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Daemon set [Serial] + [BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:39:58.937 - Jul 29 16:39:58.937: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename daemonsets 07/29/23 16:39:58.94 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:39:58.977 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:39:58.987 - [BeforeEach] [sig-apps] Daemon set [Serial] + STEP: Creating a kubernetes client 08/24/23 12:47:48.963 + Aug 24 12:47:48.963: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:47:48.965 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:48.999 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:49.004 + [BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 - [It] should list and delete a collection of DaemonSets [Conformance] - test/e2e/apps/daemon_set.go:834 - STEP: Creating simple DaemonSet "daemon-set" 07/29/23 16:39:59.036 - STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 16:39:59.051 - Jul 29 16:39:59.069: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:39:59.069: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:40:00.088: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:40:00.088: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:40:01.097: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:40:01.098: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 - Jul 29 16:40:02.095: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 - Jul 29 16:40:02.095: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set - STEP: listing all DeamonSets 07/29/23 16:40:02.101 - STEP: DeleteCollection of the DaemonSets 07/29/23 16:40:02.11 - STEP: Verify that ReplicaSets have been deleted 07/29/23 16:40:02.123 - [AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 - Jul 29 16:40:02.154: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"28165"},"items":null} - - Jul 29 16:40:02.163: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"28166"},"items":[{"metadata":{"name":"daemon-set-2vtj9","generateName":"daemon-set-","namespace":"daemonsets-2136","uid":"90559f23-88a2-4a7c-8b28-8ae6ef3ed73f","resourceVersion":"28163","creationTimestamp":"2023-07-29T16:39:59Z","deletionTimestamp":"2023-07-29T16:40:32Z","deletionGracePeriodSeconds":30,"labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"a7cb7221-172e-4896-9b42-693cef7c0728","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:39:59Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"a7cb7221-172e-4896-9b42-693cef7c0728\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:40:01Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.187\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-rdv59","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-rdv59","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"wetuj3nuajog-3","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["wetuj3nuajog-3"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:01Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:01Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"}],"hostIP":"192.168.121.141","podIP":"10.233.66.187","podIPs":[{"ip":"10.233.66.187"}],"startTime":"2023-07-29T16:39:59Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-07-29T16:40:00Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://8e3c8bb8bcace1e480b46f764716bfa217a38211175fdc74b430896a610b91c4","started":true}],"qosClass":"BestEffort"}},{"metadata":{"name":"daemon-set-j76qz","generateName":"daemon-set-","namespace":"daemonsets-2136","uid":"b7bebd63-aca7-46d1-8326-047a1b53a036","resourceVersion":"28166","creationTimestamp":"2023-07-29T16:39:59Z","deletionTimestamp":"2023-07-29T16:40:32Z","deletionGracePeriodSeconds":30,"labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"a7cb7221-172e-4896-9b42-693cef7c0728","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:39:59Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"a7cb7221-172e-4896-9b42-693cef7c0728\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:40:00Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.145\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-h8qfb","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-h8qfb","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"wetuj3nuajog-2","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["wetuj3nuajog-2"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:00Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:00Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"}],"hostIP":"192.168.121.211","podIP":"10.233.65.145","podIPs":[{"ip":"10.233.65.145"}],"startTime":"2023-07-29T16:39:59Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-07-29T16:40:00Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://2e1d775d060905d8c660dac43af4a16fce5b27d078b8b75c8e377b7f1563e02d","started":true}],"qosClass":"BestEffort"}},{"metadata":{"name":"daemon-set-v28jz","generateName":"daemon-set-","namespace":"daemonsets-2136","uid":"133b1dc9-5edd-4696-9bb6-e31968c47eef","resourceVersion":"28165","creationTimestamp":"2023-07-29T16:39:59Z","deletionTimestamp":"2023-07-29T16:40:32Z","deletionGracePeriodSeconds":30,"labels":{"controller-revision-hash":"6cff669f8c","daemonset-name":"daemon-set","pod-template-generation":"1"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"a7cb7221-172e-4896-9b42-693cef7c0728","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:39:59Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"a7cb7221-172e-4896-9b42-693cef7c0728\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2023-07-29T16:40:00Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.14\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-ggt8s","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-ggt8s","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent","securityContext":{}}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"wetuj3nuajog-1","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["wetuj3nuajog-1"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:00Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:40:00Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2023-07-29T16:39:59Z"}],"hostIP":"192.168.121.120","podIP":"10.233.64.14","podIPs":[{"ip":"10.233.64.14"}],"startTime":"2023-07-29T16:39:59Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2023-07-29T16:40:00Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"registry.k8s.io/e2e-test-images/httpd:2.4.38-4","imageID":"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22","containerID":"cri-o://271c7de04194e83873991ca53a3b63c0e6c34842611d4a78ba3122427896f9de","started":true}],"qosClass":"BestEffort"}}]} - - [AfterEach] [sig-apps] Daemon set [Serial] + [It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:119 + STEP: Creating secret with name projected-secret-test-90ab91b3-b5dd-4bf8-9342-b4255699fbd5 08/24/23 12:47:49.011 + STEP: Creating a pod to test consume secrets 08/24/23 12:47:49.021 + Aug 24 12:47:49.038: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5" in namespace "projected-6838" to be "Succeeded or Failed" + Aug 24 12:47:49.044: INFO: Pod "pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5": Phase="Pending", Reason="", readiness=false. Elapsed: 5.425482ms + Aug 24 12:47:51.054: INFO: Pod "pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015873628s + Aug 24 12:47:53.053: INFO: Pod "pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014370119s + STEP: Saw pod success 08/24/23 12:47:53.053 + Aug 24 12:47:53.054: INFO: Pod "pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5" satisfied condition "Succeeded or Failed" + Aug 24 12:47:53.061: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5 container secret-volume-test: + STEP: delete the pod 08/24/23 12:47:53.076 + Aug 24 12:47:53.094: INFO: Waiting for pod pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5 to disappear + Aug 24 12:47:53.100: INFO: Pod pod-projected-secrets-84fd1a82-1ac9-4a86-9420-a8c8cfe3dcc5 no longer exists + [AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 - Jul 29 16:40:02.206: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + Aug 24 12:47:53.100: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + [DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + [DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 - STEP: Destroying namespace "daemonsets-2136" for this suite. 07/29/23 16:40:02.227 + STEP: Destroying namespace "projected-6838" for this suite. 08/24/23 12:47:53.11 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSS +SSSSSSSSSSS ------------------------------ -[sig-apps] ReplicaSet - should adopt matching pods on creation and release no longer matching pods [Conformance] - test/e2e/apps/replica_set.go:131 -[BeforeEach] [sig-apps] ReplicaSet +[sig-storage] Downward API volume + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:261 +[BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:40:02.24 -Jul 29 16:40:02.240: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replicaset 07/29/23 16:40:02.244 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:02.274 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:02.277 -[BeforeEach] [sig-apps] ReplicaSet +STEP: Creating a kubernetes client 08/24/23 12:47:53.126 +Aug 24 12:47:53.126: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:47:53.128 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:53.151 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:53.156 +[BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 -[It] should adopt matching pods on creation and release no longer matching pods [Conformance] - test/e2e/apps/replica_set.go:131 -STEP: Given a Pod with a 'name' label pod-adoption-release is created 07/29/23 16:40:02.281 -Jul 29 16:40:02.318: INFO: Waiting up to 5m0s for pod "pod-adoption-release" in namespace "replicaset-570" to be "running and ready" -Jul 29 16:40:02.324: INFO: Pod "pod-adoption-release": Phase="Pending", Reason="", readiness=false. Elapsed: 6.237156ms -Jul 29 16:40:02.324: INFO: The phase of Pod pod-adoption-release is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:40:04.332: INFO: Pod "pod-adoption-release": Phase="Running", Reason="", readiness=true. Elapsed: 2.01420445s -Jul 29 16:40:04.332: INFO: The phase of Pod pod-adoption-release is Running (Ready = true) -Jul 29 16:40:04.332: INFO: Pod "pod-adoption-release" satisfied condition "running and ready" -STEP: When a replicaset with a matching selector is created 07/29/23 16:40:04.34 -STEP: Then the orphan pod is adopted 07/29/23 16:40:04.35 -STEP: When the matched label of one of its pods change 07/29/23 16:40:05.369 -Jul 29 16:40:05.376: INFO: Pod name pod-adoption-release: Found 1 pods out of 1 -STEP: Then the pod is released 07/29/23 16:40:05.393 -[AfterEach] [sig-apps] ReplicaSet +[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:261 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:47:53.161 +Aug 24 12:47:53.182: INFO: Waiting up to 5m0s for pod "downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e" in namespace "downward-api-7376" to be "Succeeded or Failed" +Aug 24 12:47:53.200: INFO: Pod "downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e": Phase="Pending", Reason="", readiness=false. Elapsed: 17.607ms +Aug 24 12:47:55.210: INFO: Pod "downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027371728s +Aug 24 12:47:57.210: INFO: Pod "downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026975203s +STEP: Saw pod success 08/24/23 12:47:57.21 +Aug 24 12:47:57.210: INFO: Pod "downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e" satisfied condition "Succeeded or Failed" +Aug 24 12:47:57.215: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e container client-container: +STEP: delete the pod 08/24/23 12:47:57.227 +Aug 24 12:47:57.251: INFO: Waiting for pod downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e to disappear +Aug 24 12:47:57.258: INFO: Pod downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e no longer exists +[AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 16:40:06.411: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ReplicaSet +Aug 24 12:47:57.258: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ReplicaSet +[DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ReplicaSet +[DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "replicaset-570" for this suite. 07/29/23 16:40:06.419 +STEP: Destroying namespace "downward-api-7376" for this suite. 08/24/23 12:47:57.268 ------------------------------ -• [4.190 seconds] -[sig-apps] ReplicaSet -test/e2e/apps/framework.go:23 - should adopt matching pods on creation and release no longer matching pods [Conformance] - test/e2e/apps/replica_set.go:131 +• [4.156 seconds] +[sig-storage] Downward API volume +test/e2e/common/storage/framework.go:23 + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:261 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ReplicaSet + [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:40:02.24 - Jul 29 16:40:02.240: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replicaset 07/29/23 16:40:02.244 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:02.274 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:02.277 - [BeforeEach] [sig-apps] ReplicaSet + STEP: Creating a kubernetes client 08/24/23 12:47:53.126 + Aug 24 12:47:53.126: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:47:53.128 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:53.151 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:53.156 + [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 - [It] should adopt matching pods on creation and release no longer matching pods [Conformance] - test/e2e/apps/replica_set.go:131 - STEP: Given a Pod with a 'name' label pod-adoption-release is created 07/29/23 16:40:02.281 - Jul 29 16:40:02.318: INFO: Waiting up to 5m0s for pod "pod-adoption-release" in namespace "replicaset-570" to be "running and ready" - Jul 29 16:40:02.324: INFO: Pod "pod-adoption-release": Phase="Pending", Reason="", readiness=false. Elapsed: 6.237156ms - Jul 29 16:40:02.324: INFO: The phase of Pod pod-adoption-release is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:40:04.332: INFO: Pod "pod-adoption-release": Phase="Running", Reason="", readiness=true. Elapsed: 2.01420445s - Jul 29 16:40:04.332: INFO: The phase of Pod pod-adoption-release is Running (Ready = true) - Jul 29 16:40:04.332: INFO: Pod "pod-adoption-release" satisfied condition "running and ready" - STEP: When a replicaset with a matching selector is created 07/29/23 16:40:04.34 - STEP: Then the orphan pod is adopted 07/29/23 16:40:04.35 - STEP: When the matched label of one of its pods change 07/29/23 16:40:05.369 - Jul 29 16:40:05.376: INFO: Pod name pod-adoption-release: Found 1 pods out of 1 - STEP: Then the pod is released 07/29/23 16:40:05.393 - [AfterEach] [sig-apps] ReplicaSet + [BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 + [It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:261 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:47:53.161 + Aug 24 12:47:53.182: INFO: Waiting up to 5m0s for pod "downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e" in namespace "downward-api-7376" to be "Succeeded or Failed" + Aug 24 12:47:53.200: INFO: Pod "downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e": Phase="Pending", Reason="", readiness=false. Elapsed: 17.607ms + Aug 24 12:47:55.210: INFO: Pod "downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027371728s + Aug 24 12:47:57.210: INFO: Pod "downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026975203s + STEP: Saw pod success 08/24/23 12:47:57.21 + Aug 24 12:47:57.210: INFO: Pod "downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e" satisfied condition "Succeeded or Failed" + Aug 24 12:47:57.215: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e container client-container: + STEP: delete the pod 08/24/23 12:47:57.227 + Aug 24 12:47:57.251: INFO: Waiting for pod downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e to disappear + Aug 24 12:47:57.258: INFO: Pod downwardapi-volume-21da27fa-143c-4927-af44-ede01958e98e no longer exists + [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 16:40:06.411: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ReplicaSet + Aug 24 12:47:57.258: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ReplicaSet + [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ReplicaSet + [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "replicaset-570" for this suite. 07/29/23 16:40:06.419 + STEP: Destroying namespace "downward-api-7376" for this suite. 08/24/23 12:47:57.268 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] ConfigMap - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:89 -[BeforeEach] [sig-storage] ConfigMap +[sig-apps] Deployment + RollingUpdateDeployment should delete old pods and create new ones [Conformance] + test/e2e/apps/deployment.go:105 +[BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:40:06.434 -Jul 29 16:40:06.434: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:40:06.436 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:06.468 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:06.473 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 12:47:57.289 +Aug 24 12:47:57.289: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename deployment 08/24/23 12:47:57.291 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:57.317 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:57.323 +[BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:89 -STEP: Creating configMap with name configmap-test-volume-map-dab979ed-c44e-4eb6-979c-c8c5dc0eb5ad 07/29/23 16:40:06.478 -STEP: Creating a pod to test consume configMaps 07/29/23 16:40:06.491 -Jul 29 16:40:06.513: INFO: Waiting up to 5m0s for pod "pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81" in namespace "configmap-5145" to be "Succeeded or Failed" -Jul 29 16:40:06.530: INFO: Pod "pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81": Phase="Pending", Reason="", readiness=false. Elapsed: 17.28753ms -Jul 29 16:40:08.542: INFO: Pod "pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81": Phase="Pending", Reason="", readiness=false. Elapsed: 2.029355405s -Jul 29 16:40:10.537: INFO: Pod "pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02409303s -STEP: Saw pod success 07/29/23 16:40:10.537 -Jul 29 16:40:10.538: INFO: Pod "pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81" satisfied condition "Succeeded or Failed" -Jul 29 16:40:10.543: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81 container agnhost-container: -STEP: delete the pod 07/29/23 16:40:10.555 -Jul 29 16:40:10.577: INFO: Waiting for pod pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81 to disappear -Jul 29 16:40:10.583: INFO: Pod pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81 no longer exists -[AfterEach] [sig-storage] ConfigMap +[BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 +[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance] + test/e2e/apps/deployment.go:105 +Aug 24 12:47:57.329: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted) +Aug 24 12:47:57.348: INFO: Pod name sample-pod: Found 0 pods out of 1 +Aug 24 12:48:02.358: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running 08/24/23 12:48:02.359 +Aug 24 12:48:02.359: INFO: Creating deployment "test-rolling-update-deployment" +Aug 24 12:48:02.374: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has +Aug 24 12:48:02.390: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created +Aug 24 12:48:04.404: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected +Aug 24 12:48:04.411: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted) +[AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 +Aug 24 12:48:04.432: INFO: Deployment "test-rolling-update-deployment": +&Deployment{ObjectMeta:{test-rolling-update-deployment deployment-5338 50aaa261-1616-4a69-8e90-551739a91f7a 26209 1 2023-08-24 12:48:02 +0000 UTC map[name:sample-pod] map[deployment.kubernetes.io/revision:3546343826724305833] [] [] [{e2e.test Update apps/v1 2023-08-24 12:48:02 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:48:03 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0052c2808 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-08-24 12:48:02 +0000 UTC,LastTransitionTime:2023-08-24 12:48:02 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rolling-update-deployment-7549d9f46d" has successfully progressed.,LastUpdateTime:2023-08-24 12:48:03 +0000 UTC,LastTransitionTime:2023-08-24 12:48:02 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Aug 24 12:48:04.439: INFO: New ReplicaSet "test-rolling-update-deployment-7549d9f46d" of Deployment "test-rolling-update-deployment": +&ReplicaSet{ObjectMeta:{test-rolling-update-deployment-7549d9f46d deployment-5338 4ffbda43-b586-4c57-b462-89db37ba49d7 26198 1 2023-08-24 12:48:02 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305833] [{apps/v1 Deployment test-rolling-update-deployment 50aaa261-1616-4a69-8e90-551739a91f7a 0xc0052c2ce7 0xc0052c2ce8}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:48:02 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"50aaa261-1616-4a69-8e90-551739a91f7a\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:48:03 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 7549d9f46d,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0052c2d98 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:48:04.439: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment": +Aug 24 12:48:04.439: INFO: &ReplicaSet{ObjectMeta:{test-rolling-update-controller deployment-5338 b678bb20-8ec6-4e6c-b5b0-51e8bcb847a6 26208 2 2023-08-24 12:47:57 +0000 UTC map[name:sample-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305832] [{apps/v1 Deployment test-rolling-update-deployment 50aaa261-1616-4a69-8e90-551739a91f7a 0xc0052c2bb7 0xc0052c2bb8}] [] [{e2e.test Update apps/v1 2023-08-24 12:47:57 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:48:03 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"50aaa261-1616-4a69-8e90-551739a91f7a\"}":{}}},"f:spec":{"f:replicas":{}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:48:03 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc0052c2c78 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:48:04.452: INFO: Pod "test-rolling-update-deployment-7549d9f46d-n5msk" is available: +&Pod{ObjectMeta:{test-rolling-update-deployment-7549d9f46d-n5msk test-rolling-update-deployment-7549d9f46d- deployment-5338 c801e47d-ac07-4a69-b827-099b42966883 26197 0 2023-08-24 12:48:02 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[] [{apps/v1 ReplicaSet test-rolling-update-deployment-7549d9f46d 4ffbda43-b586-4c57-b462-89db37ba49d7 0xc00539ccc7 0xc00539ccc8}] [] [{kube-controller-manager Update v1 2023-08-24 12:48:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4ffbda43-b586-4c57-b462-89db37ba49d7\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:48:03 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.36\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-5zllj,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5zllj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:48:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:48:03 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:48:03 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:48:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.36,StartTime:2023-08-24 12:48:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:48:03 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,ImageID:registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e,ContainerID:cri-o://01a1d07d83574a93cceeef0ea3a53416ff810474030028cde566625d60d2667c,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.36,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 -Jul 29 16:40:10.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 12:48:04.452: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-5145" for this suite. 07/29/23 16:40:10.591 +STEP: Destroying namespace "deployment-5338" for this suite. 08/24/23 12:48:04.463 ------------------------------ -• [4.168 seconds] -[sig-storage] ConfigMap -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:89 +• [SLOW TEST] [7.193 seconds] +[sig-apps] Deployment +test/e2e/apps/framework.go:23 + RollingUpdateDeployment should delete old pods and create new ones [Conformance] + test/e2e/apps/deployment.go:105 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:40:06.434 - Jul 29 16:40:06.434: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:40:06.436 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:06.468 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:06.473 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 12:47:57.289 + Aug 24 12:47:57.289: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename deployment 08/24/23 12:47:57.291 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:47:57.317 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:47:57.323 + [BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:89 - STEP: Creating configMap with name configmap-test-volume-map-dab979ed-c44e-4eb6-979c-c8c5dc0eb5ad 07/29/23 16:40:06.478 - STEP: Creating a pod to test consume configMaps 07/29/23 16:40:06.491 - Jul 29 16:40:06.513: INFO: Waiting up to 5m0s for pod "pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81" in namespace "configmap-5145" to be "Succeeded or Failed" - Jul 29 16:40:06.530: INFO: Pod "pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81": Phase="Pending", Reason="", readiness=false. Elapsed: 17.28753ms - Jul 29 16:40:08.542: INFO: Pod "pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81": Phase="Pending", Reason="", readiness=false. Elapsed: 2.029355405s - Jul 29 16:40:10.537: INFO: Pod "pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02409303s - STEP: Saw pod success 07/29/23 16:40:10.537 - Jul 29 16:40:10.538: INFO: Pod "pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81" satisfied condition "Succeeded or Failed" - Jul 29 16:40:10.543: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81 container agnhost-container: - STEP: delete the pod 07/29/23 16:40:10.555 - Jul 29 16:40:10.577: INFO: Waiting for pod pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81 to disappear - Jul 29 16:40:10.583: INFO: Pod pod-configmaps-7160c0df-61f3-4ca6-bcfc-f5f1c5c47a81 no longer exists - [AfterEach] [sig-storage] ConfigMap + [BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 + [It] RollingUpdateDeployment should delete old pods and create new ones [Conformance] + test/e2e/apps/deployment.go:105 + Aug 24 12:47:57.329: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted) + Aug 24 12:47:57.348: INFO: Pod name sample-pod: Found 0 pods out of 1 + Aug 24 12:48:02.358: INFO: Pod name sample-pod: Found 1 pods out of 1 + STEP: ensuring each pod is running 08/24/23 12:48:02.359 + Aug 24 12:48:02.359: INFO: Creating deployment "test-rolling-update-deployment" + Aug 24 12:48:02.374: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has + Aug 24 12:48:02.390: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created + Aug 24 12:48:04.404: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected + Aug 24 12:48:04.411: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted) + [AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 + Aug 24 12:48:04.432: INFO: Deployment "test-rolling-update-deployment": + &Deployment{ObjectMeta:{test-rolling-update-deployment deployment-5338 50aaa261-1616-4a69-8e90-551739a91f7a 26209 1 2023-08-24 12:48:02 +0000 UTC map[name:sample-pod] map[deployment.kubernetes.io/revision:3546343826724305833] [] [] [{e2e.test Update apps/v1 2023-08-24 12:48:02 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:48:03 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0052c2808 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-08-24 12:48:02 +0000 UTC,LastTransitionTime:2023-08-24 12:48:02 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rolling-update-deployment-7549d9f46d" has successfully progressed.,LastUpdateTime:2023-08-24 12:48:03 +0000 UTC,LastTransitionTime:2023-08-24 12:48:02 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + + Aug 24 12:48:04.439: INFO: New ReplicaSet "test-rolling-update-deployment-7549d9f46d" of Deployment "test-rolling-update-deployment": + &ReplicaSet{ObjectMeta:{test-rolling-update-deployment-7549d9f46d deployment-5338 4ffbda43-b586-4c57-b462-89db37ba49d7 26198 1 2023-08-24 12:48:02 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305833] [{apps/v1 Deployment test-rolling-update-deployment 50aaa261-1616-4a69-8e90-551739a91f7a 0xc0052c2ce7 0xc0052c2ce8}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:48:02 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"50aaa261-1616-4a69-8e90-551739a91f7a\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:48:03 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 7549d9f46d,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0052c2d98 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:48:04.439: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment": + Aug 24 12:48:04.439: INFO: &ReplicaSet{ObjectMeta:{test-rolling-update-controller deployment-5338 b678bb20-8ec6-4e6c-b5b0-51e8bcb847a6 26208 2 2023-08-24 12:47:57 +0000 UTC map[name:sample-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305832] [{apps/v1 Deployment test-rolling-update-deployment 50aaa261-1616-4a69-8e90-551739a91f7a 0xc0052c2bb7 0xc0052c2bb8}] [] [{e2e.test Update apps/v1 2023-08-24 12:47:57 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:48:03 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"50aaa261-1616-4a69-8e90-551739a91f7a\"}":{}}},"f:spec":{"f:replicas":{}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:48:03 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc0052c2c78 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:48:04.452: INFO: Pod "test-rolling-update-deployment-7549d9f46d-n5msk" is available: + &Pod{ObjectMeta:{test-rolling-update-deployment-7549d9f46d-n5msk test-rolling-update-deployment-7549d9f46d- deployment-5338 c801e47d-ac07-4a69-b827-099b42966883 26197 0 2023-08-24 12:48:02 +0000 UTC map[name:sample-pod pod-template-hash:7549d9f46d] map[] [{apps/v1 ReplicaSet test-rolling-update-deployment-7549d9f46d 4ffbda43-b586-4c57-b462-89db37ba49d7 0xc00539ccc7 0xc00539ccc8}] [] [{kube-controller-manager Update v1 2023-08-24 12:48:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"4ffbda43-b586-4c57-b462-89db37ba49d7\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:48:03 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.36\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-5zllj,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5zllj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:48:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:48:03 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:48:03 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:48:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.36,StartTime:2023-08-24 12:48:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:48:03 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,ImageID:registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e,ContainerID:cri-o://01a1d07d83574a93cceeef0ea3a53416ff810474030028cde566625d60d2667c,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.36,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + [AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 - Jul 29 16:40:10.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 12:48:04.452: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-5145" for this suite. 07/29/23 16:40:10.591 + STEP: Destroying namespace "deployment-5338" for this suite. 08/24/23 12:48:04.463 << End Captured GinkgoWriter Output ------------------------------ -S +SS ------------------------------ [sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance] test/e2e/kubectl/kubectl.go:1652 [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:40:10.603 -Jul 29 16:40:10.603: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:40:10.604 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:10.63 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:10.634 +STEP: Creating a kubernetes client 08/24/23 12:48:04.484 +Aug 24 12:48:04.484: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:48:04.487 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:04.519 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:04.524 [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 [It] should add annotations for pods in rc [Conformance] test/e2e/kubectl/kubectl.go:1652 -STEP: creating Agnhost RC 07/29/23 16:40:10.638 -Jul 29 16:40:10.639: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1939 create -f -' -Jul 29 16:40:12.524: INFO: stderr: "" -Jul 29 16:40:12.524: INFO: stdout: "replicationcontroller/agnhost-primary created\n" -STEP: Waiting for Agnhost primary to start. 07/29/23 16:40:12.524 -Jul 29 16:40:13.534: INFO: Selector matched 1 pods for map[app:agnhost] -Jul 29 16:40:13.535: INFO: Found 0 / 1 -Jul 29 16:40:14.539: INFO: Selector matched 1 pods for map[app:agnhost] -Jul 29 16:40:14.539: INFO: Found 1 / 1 -Jul 29 16:40:14.539: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 -STEP: patching all pods 07/29/23 16:40:14.539 -Jul 29 16:40:14.546: INFO: Selector matched 1 pods for map[app:agnhost] -Jul 29 16:40:14.546: INFO: ForEach: Found 1 pods from the filter. Now looping through them. -Jul 29 16:40:14.546: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1939 patch pod agnhost-primary-z8ddx -p {"metadata":{"annotations":{"x":"y"}}}' -Jul 29 16:40:14.711: INFO: stderr: "" -Jul 29 16:40:14.711: INFO: stdout: "pod/agnhost-primary-z8ddx patched\n" -STEP: checking annotations 07/29/23 16:40:14.711 -Jul 29 16:40:14.718: INFO: Selector matched 1 pods for map[app:agnhost] -Jul 29 16:40:14.718: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +STEP: creating Agnhost RC 08/24/23 12:48:04.528 +Aug 24 12:48:04.529: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-4654 create -f -' +Aug 24 12:48:05.870: INFO: stderr: "" +Aug 24 12:48:05.870: INFO: stdout: "replicationcontroller/agnhost-primary created\n" +STEP: Waiting for Agnhost primary to start. 08/24/23 12:48:05.87 +Aug 24 12:48:06.880: INFO: Selector matched 1 pods for map[app:agnhost] +Aug 24 12:48:06.880: INFO: Found 0 / 1 +Aug 24 12:48:07.878: INFO: Selector matched 1 pods for map[app:agnhost] +Aug 24 12:48:07.878: INFO: Found 1 / 1 +Aug 24 12:48:07.879: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +STEP: patching all pods 08/24/23 12:48:07.879 +Aug 24 12:48:07.885: INFO: Selector matched 1 pods for map[app:agnhost] +Aug 24 12:48:07.885: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Aug 24 12:48:07.886: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-4654 patch pod agnhost-primary-chc95 -p {"metadata":{"annotations":{"x":"y"}}}' +Aug 24 12:48:08.102: INFO: stderr: "" +Aug 24 12:48:08.102: INFO: stdout: "pod/agnhost-primary-chc95 patched\n" +STEP: checking annotations 08/24/23 12:48:08.102 +Aug 24 12:48:08.109: INFO: Selector matched 1 pods for map[app:agnhost] +Aug 24 12:48:08.109: INFO: ForEach: Found 1 pods from the filter. Now looping through them. [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 16:40:14.718: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:48:08.109: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-1939" for this suite. 07/29/23 16:40:14.728 +STEP: Destroying namespace "kubectl-4654" for this suite. 08/24/23 12:48:08.117 ------------------------------ -• [4.137 seconds] +• [3.647 seconds] [sig-cli] Kubectl client test/e2e/kubectl/framework.go:23 Kubectl patch @@ -24598,4964 +23762,5694 @@ test/e2e/kubectl/framework.go:23 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:40:10.603 - Jul 29 16:40:10.603: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:40:10.604 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:10.63 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:10.634 + STEP: Creating a kubernetes client 08/24/23 12:48:04.484 + Aug 24 12:48:04.484: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:48:04.487 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:04.519 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:04.524 [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 [It] should add annotations for pods in rc [Conformance] test/e2e/kubectl/kubectl.go:1652 - STEP: creating Agnhost RC 07/29/23 16:40:10.638 - Jul 29 16:40:10.639: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1939 create -f -' - Jul 29 16:40:12.524: INFO: stderr: "" - Jul 29 16:40:12.524: INFO: stdout: "replicationcontroller/agnhost-primary created\n" - STEP: Waiting for Agnhost primary to start. 07/29/23 16:40:12.524 - Jul 29 16:40:13.534: INFO: Selector matched 1 pods for map[app:agnhost] - Jul 29 16:40:13.535: INFO: Found 0 / 1 - Jul 29 16:40:14.539: INFO: Selector matched 1 pods for map[app:agnhost] - Jul 29 16:40:14.539: INFO: Found 1 / 1 - Jul 29 16:40:14.539: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 - STEP: patching all pods 07/29/23 16:40:14.539 - Jul 29 16:40:14.546: INFO: Selector matched 1 pods for map[app:agnhost] - Jul 29 16:40:14.546: INFO: ForEach: Found 1 pods from the filter. Now looping through them. - Jul 29 16:40:14.546: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1939 patch pod agnhost-primary-z8ddx -p {"metadata":{"annotations":{"x":"y"}}}' - Jul 29 16:40:14.711: INFO: stderr: "" - Jul 29 16:40:14.711: INFO: stdout: "pod/agnhost-primary-z8ddx patched\n" - STEP: checking annotations 07/29/23 16:40:14.711 - Jul 29 16:40:14.718: INFO: Selector matched 1 pods for map[app:agnhost] - Jul 29 16:40:14.718: INFO: ForEach: Found 1 pods from the filter. Now looping through them. + STEP: creating Agnhost RC 08/24/23 12:48:04.528 + Aug 24 12:48:04.529: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-4654 create -f -' + Aug 24 12:48:05.870: INFO: stderr: "" + Aug 24 12:48:05.870: INFO: stdout: "replicationcontroller/agnhost-primary created\n" + STEP: Waiting for Agnhost primary to start. 08/24/23 12:48:05.87 + Aug 24 12:48:06.880: INFO: Selector matched 1 pods for map[app:agnhost] + Aug 24 12:48:06.880: INFO: Found 0 / 1 + Aug 24 12:48:07.878: INFO: Selector matched 1 pods for map[app:agnhost] + Aug 24 12:48:07.878: INFO: Found 1 / 1 + Aug 24 12:48:07.879: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 + STEP: patching all pods 08/24/23 12:48:07.879 + Aug 24 12:48:07.885: INFO: Selector matched 1 pods for map[app:agnhost] + Aug 24 12:48:07.885: INFO: ForEach: Found 1 pods from the filter. Now looping through them. + Aug 24 12:48:07.886: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-4654 patch pod agnhost-primary-chc95 -p {"metadata":{"annotations":{"x":"y"}}}' + Aug 24 12:48:08.102: INFO: stderr: "" + Aug 24 12:48:08.102: INFO: stdout: "pod/agnhost-primary-chc95 patched\n" + STEP: checking annotations 08/24/23 12:48:08.102 + Aug 24 12:48:08.109: INFO: Selector matched 1 pods for map[app:agnhost] + Aug 24 12:48:08.109: INFO: ForEach: Found 1 pods from the filter. Now looping through them. [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 16:40:14.718: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:48:08.109: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-1939" for this suite. 07/29/23 16:40:14.728 + STEP: Destroying namespace "kubectl-4654" for this suite. 08/24/23 12:48:08.117 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:197 -[BeforeEach] [sig-storage] EmptyDir volumes +[sig-api-machinery] Servers with support for Table transformation + should return a 406 for a backend which does not implement metadata [Conformance] + test/e2e/apimachinery/table_conversion.go:154 +[BeforeEach] [sig-api-machinery] Servers with support for Table transformation set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:40:14.741 -Jul 29 16:40:14.741: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 16:40:14.744 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:14.774 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:14.779 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 12:48:08.136 +Aug 24 12:48:08.136: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename tables 08/24/23 12:48:08.142 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:08.175 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:08.187 +[BeforeEach] [sig-api-machinery] Servers with support for Table transformation test/e2e/framework/metrics/init/init.go:31 -[It] should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:197 -STEP: Creating a pod to test emptydir 0644 on node default medium 07/29/23 16:40:14.784 -Jul 29 16:40:14.803: INFO: Waiting up to 5m0s for pod "pod-50d32a20-8705-426d-83dc-deadc8e99951" in namespace "emptydir-7780" to be "Succeeded or Failed" -Jul 29 16:40:14.816: INFO: Pod "pod-50d32a20-8705-426d-83dc-deadc8e99951": Phase="Pending", Reason="", readiness=false. Elapsed: 12.848495ms -Jul 29 16:40:16.826: INFO: Pod "pod-50d32a20-8705-426d-83dc-deadc8e99951": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022462062s -Jul 29 16:40:18.823: INFO: Pod "pod-50d32a20-8705-426d-83dc-deadc8e99951": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019508029s -STEP: Saw pod success 07/29/23 16:40:18.823 -Jul 29 16:40:18.823: INFO: Pod "pod-50d32a20-8705-426d-83dc-deadc8e99951" satisfied condition "Succeeded or Failed" -Jul 29 16:40:18.827: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-50d32a20-8705-426d-83dc-deadc8e99951 container test-container: -STEP: delete the pod 07/29/23 16:40:18.841 -Jul 29 16:40:18.916: INFO: Waiting for pod pod-50d32a20-8705-426d-83dc-deadc8e99951 to disappear -Jul 29 16:40:18.922: INFO: Pod pod-50d32a20-8705-426d-83dc-deadc8e99951 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-api-machinery] Servers with support for Table transformation + test/e2e/apimachinery/table_conversion.go:49 +[It] should return a 406 for a backend which does not implement metadata [Conformance] + test/e2e/apimachinery/table_conversion.go:154 +[AfterEach] [sig-api-machinery] Servers with support for Table transformation test/e2e/framework/node/init/init.go:32 -Jul 29 16:40:18.922: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 12:48:08.197: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-7780" for this suite. 07/29/23 16:40:18.929 +STEP: Destroying namespace "tables-4716" for this suite. 08/24/23 12:48:08.209 ------------------------------ -• [4.200 seconds] -[sig-storage] EmptyDir volumes -test/e2e/common/storage/framework.go:23 - should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:197 +• [0.091 seconds] +[sig-api-machinery] Servers with support for Table transformation +test/e2e/apimachinery/framework.go:23 + should return a 406 for a backend which does not implement metadata [Conformance] + test/e2e/apimachinery/table_conversion.go:154 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-api-machinery] Servers with support for Table transformation set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:40:14.741 - Jul 29 16:40:14.741: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 16:40:14.744 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:14.774 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:14.779 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 12:48:08.136 + Aug 24 12:48:08.136: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename tables 08/24/23 12:48:08.142 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:08.175 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:08.187 + [BeforeEach] [sig-api-machinery] Servers with support for Table transformation test/e2e/framework/metrics/init/init.go:31 - [It] should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:197 - STEP: Creating a pod to test emptydir 0644 on node default medium 07/29/23 16:40:14.784 - Jul 29 16:40:14.803: INFO: Waiting up to 5m0s for pod "pod-50d32a20-8705-426d-83dc-deadc8e99951" in namespace "emptydir-7780" to be "Succeeded or Failed" - Jul 29 16:40:14.816: INFO: Pod "pod-50d32a20-8705-426d-83dc-deadc8e99951": Phase="Pending", Reason="", readiness=false. Elapsed: 12.848495ms - Jul 29 16:40:16.826: INFO: Pod "pod-50d32a20-8705-426d-83dc-deadc8e99951": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022462062s - Jul 29 16:40:18.823: INFO: Pod "pod-50d32a20-8705-426d-83dc-deadc8e99951": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019508029s - STEP: Saw pod success 07/29/23 16:40:18.823 - Jul 29 16:40:18.823: INFO: Pod "pod-50d32a20-8705-426d-83dc-deadc8e99951" satisfied condition "Succeeded or Failed" - Jul 29 16:40:18.827: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-50d32a20-8705-426d-83dc-deadc8e99951 container test-container: - STEP: delete the pod 07/29/23 16:40:18.841 - Jul 29 16:40:18.916: INFO: Waiting for pod pod-50d32a20-8705-426d-83dc-deadc8e99951 to disappear - Jul 29 16:40:18.922: INFO: Pod pod-50d32a20-8705-426d-83dc-deadc8e99951 no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-api-machinery] Servers with support for Table transformation + test/e2e/apimachinery/table_conversion.go:49 + [It] should return a 406 for a backend which does not implement metadata [Conformance] + test/e2e/apimachinery/table_conversion.go:154 + [AfterEach] [sig-api-machinery] Servers with support for Table transformation test/e2e/framework/node/init/init.go:32 - Jul 29 16:40:18.922: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 12:48:08.197: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-api-machinery] Servers with support for Table transformation tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-7780" for this suite. 07/29/23 16:40:18.929 + STEP: Destroying namespace "tables-4716" for this suite. 08/24/23 12:48:08.209 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SS ------------------------------ -[sig-storage] Projected downwardAPI - should update labels on modification [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:130 -[BeforeEach] [sig-storage] Projected downwardAPI +[sig-api-machinery] Namespaces [Serial] + should apply a finalizer to a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:394 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:40:18.95 -Jul 29 16:40:18.950: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:40:18.952 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:18.977 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:18.981 -[BeforeEach] [sig-storage] Projected downwardAPI +STEP: Creating a kubernetes client 08/24/23 12:48:08.228 +Aug 24 12:48:08.228: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename namespaces 08/24/23 12:48:08.23 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:08.258 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:08.269 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should update labels on modification [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:130 -STEP: Creating the pod 07/29/23 16:40:18.985 -Jul 29 16:40:19.000: INFO: Waiting up to 5m0s for pod "labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7" in namespace "projected-3257" to be "running and ready" -Jul 29 16:40:19.019: INFO: Pod "labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7": Phase="Pending", Reason="", readiness=false. Elapsed: 18.870949ms -Jul 29 16:40:19.019: INFO: The phase of Pod labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:40:21.026: INFO: Pod "labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7": Phase="Running", Reason="", readiness=true. Elapsed: 2.026362326s -Jul 29 16:40:21.027: INFO: The phase of Pod labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7 is Running (Ready = true) -Jul 29 16:40:21.027: INFO: Pod "labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7" satisfied condition "running and ready" -Jul 29 16:40:21.570: INFO: Successfully updated pod "labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7" -[AfterEach] [sig-storage] Projected downwardAPI +[It] should apply a finalizer to a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:394 +STEP: Creating namespace "e2e-ns-rpx6b" 08/24/23 12:48:08.274 +Aug 24 12:48:08.306: INFO: Namespace "e2e-ns-rpx6b-5069" has []v1.FinalizerName{"kubernetes"} +STEP: Adding e2e finalizer to namespace "e2e-ns-rpx6b-5069" 08/24/23 12:48:08.306 +Aug 24 12:48:08.324: INFO: Namespace "e2e-ns-rpx6b-5069" has []v1.FinalizerName{"kubernetes", "e2e.example.com/fakeFinalizer"} +STEP: Removing e2e finalizer from namespace "e2e-ns-rpx6b-5069" 08/24/23 12:48:08.325 +Aug 24 12:48:08.343: INFO: Namespace "e2e-ns-rpx6b-5069" has []v1.FinalizerName{"kubernetes"} +[AfterEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:40:25.630: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +Aug 24 12:48:08.344: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + tear down framework | framework.go:193 +STEP: Destroying namespace "namespaces-4251" for this suite. 08/24/23 12:48:08.357 +STEP: Destroying namespace "e2e-ns-rpx6b-5069" for this suite. 08/24/23 12:48:08.369 +------------------------------ +• [0.154 seconds] +[sig-api-machinery] Namespaces [Serial] +test/e2e/apimachinery/framework.go:23 + should apply a finalizer to a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:394 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-api-machinery] Namespaces [Serial] + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:48:08.228 + Aug 24 12:48:08.228: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename namespaces 08/24/23 12:48:08.23 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:08.258 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:08.269 + [BeforeEach] [sig-api-machinery] Namespaces [Serial] + test/e2e/framework/metrics/init/init.go:31 + [It] should apply a finalizer to a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:394 + STEP: Creating namespace "e2e-ns-rpx6b" 08/24/23 12:48:08.274 + Aug 24 12:48:08.306: INFO: Namespace "e2e-ns-rpx6b-5069" has []v1.FinalizerName{"kubernetes"} + STEP: Adding e2e finalizer to namespace "e2e-ns-rpx6b-5069" 08/24/23 12:48:08.306 + Aug 24 12:48:08.324: INFO: Namespace "e2e-ns-rpx6b-5069" has []v1.FinalizerName{"kubernetes", "e2e.example.com/fakeFinalizer"} + STEP: Removing e2e finalizer from namespace "e2e-ns-rpx6b-5069" 08/24/23 12:48:08.325 + Aug 24 12:48:08.343: INFO: Namespace "e2e-ns-rpx6b-5069" has []v1.FinalizerName{"kubernetes"} + [AfterEach] [sig-api-machinery] Namespaces [Serial] + test/e2e/framework/node/init/init.go:32 + Aug 24 12:48:08.344: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + tear down framework | framework.go:193 + STEP: Destroying namespace "namespaces-4251" for this suite. 08/24/23 12:48:08.357 + STEP: Destroying namespace "e2e-ns-rpx6b-5069" for this suite. 08/24/23 12:48:08.369 + << End Captured GinkgoWriter Output +------------------------------ +SSS +------------------------------ +[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop http hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:212 +[BeforeEach] [sig-node] Container Lifecycle Hook + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:48:08.383 +Aug 24 12:48:08.383: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-lifecycle-hook 08/24/23 12:48:08.385 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:08.41 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:08.415 +[BeforeEach] [sig-node] Container Lifecycle Hook + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] when create a pod with lifecycle hook + test/e2e/common/node/lifecycle_hook.go:77 +STEP: create the container to handle the HTTPGet hook request. 08/24/23 12:48:08.43 +Aug 24 12:48:08.447: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-5460" to be "running and ready" +Aug 24 12:48:08.456: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 8.64686ms +Aug 24 12:48:08.456: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:48:10.463: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 2.015349344s +Aug 24 12:48:10.463: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) +Aug 24 12:48:10.463: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" +[It] should execute prestop http hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:212 +STEP: create the pod with lifecycle hook 08/24/23 12:48:10.469 +Aug 24 12:48:10.480: INFO: Waiting up to 5m0s for pod "pod-with-prestop-http-hook" in namespace "container-lifecycle-hook-5460" to be "running and ready" +Aug 24 12:48:10.486: INFO: Pod "pod-with-prestop-http-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 6.075808ms +Aug 24 12:48:10.486: INFO: The phase of Pod pod-with-prestop-http-hook is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:48:12.500: INFO: Pod "pod-with-prestop-http-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.01999678s +Aug 24 12:48:12.500: INFO: The phase of Pod pod-with-prestop-http-hook is Running (Ready = true) +Aug 24 12:48:12.500: INFO: Pod "pod-with-prestop-http-hook" satisfied condition "running and ready" +STEP: delete the pod with lifecycle hook 08/24/23 12:48:12.505 +Aug 24 12:48:12.515: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Aug 24 12:48:12.522: INFO: Pod pod-with-prestop-http-hook still exists +Aug 24 12:48:14.523: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Aug 24 12:48:14.532: INFO: Pod pod-with-prestop-http-hook still exists +Aug 24 12:48:16.523: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Aug 24 12:48:16.532: INFO: Pod pod-with-prestop-http-hook no longer exists +STEP: check prestop hook 08/24/23 12:48:16.532 +[AfterEach] [sig-node] Container Lifecycle Hook + test/e2e/framework/node/init/init.go:32 +Aug 24 12:48:16.563: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + tear down framework | framework.go:193 +STEP: Destroying namespace "container-lifecycle-hook-5460" for this suite. 08/24/23 12:48:16.573 +------------------------------ +• [SLOW TEST] [8.200 seconds] +[sig-node] Container Lifecycle Hook +test/e2e/common/node/framework.go:23 + when create a pod with lifecycle hook + test/e2e/common/node/lifecycle_hook.go:46 + should execute prestop http hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:212 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-node] Container Lifecycle Hook + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:48:08.383 + Aug 24 12:48:08.383: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-lifecycle-hook 08/24/23 12:48:08.385 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:08.41 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:08.415 + [BeforeEach] [sig-node] Container Lifecycle Hook + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] when create a pod with lifecycle hook + test/e2e/common/node/lifecycle_hook.go:77 + STEP: create the container to handle the HTTPGet hook request. 08/24/23 12:48:08.43 + Aug 24 12:48:08.447: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-5460" to be "running and ready" + Aug 24 12:48:08.456: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 8.64686ms + Aug 24 12:48:08.456: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:48:10.463: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 2.015349344s + Aug 24 12:48:10.463: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) + Aug 24 12:48:10.463: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" + [It] should execute prestop http hook properly [NodeConformance] [Conformance] + test/e2e/common/node/lifecycle_hook.go:212 + STEP: create the pod with lifecycle hook 08/24/23 12:48:10.469 + Aug 24 12:48:10.480: INFO: Waiting up to 5m0s for pod "pod-with-prestop-http-hook" in namespace "container-lifecycle-hook-5460" to be "running and ready" + Aug 24 12:48:10.486: INFO: Pod "pod-with-prestop-http-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 6.075808ms + Aug 24 12:48:10.486: INFO: The phase of Pod pod-with-prestop-http-hook is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:48:12.500: INFO: Pod "pod-with-prestop-http-hook": Phase="Running", Reason="", readiness=true. Elapsed: 2.01999678s + Aug 24 12:48:12.500: INFO: The phase of Pod pod-with-prestop-http-hook is Running (Ready = true) + Aug 24 12:48:12.500: INFO: Pod "pod-with-prestop-http-hook" satisfied condition "running and ready" + STEP: delete the pod with lifecycle hook 08/24/23 12:48:12.505 + Aug 24 12:48:12.515: INFO: Waiting for pod pod-with-prestop-http-hook to disappear + Aug 24 12:48:12.522: INFO: Pod pod-with-prestop-http-hook still exists + Aug 24 12:48:14.523: INFO: Waiting for pod pod-with-prestop-http-hook to disappear + Aug 24 12:48:14.532: INFO: Pod pod-with-prestop-http-hook still exists + Aug 24 12:48:16.523: INFO: Waiting for pod pod-with-prestop-http-hook to disappear + Aug 24 12:48:16.532: INFO: Pod pod-with-prestop-http-hook no longer exists + STEP: check prestop hook 08/24/23 12:48:16.532 + [AfterEach] [sig-node] Container Lifecycle Hook + test/e2e/framework/node/init/init.go:32 + Aug 24 12:48:16.563: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + tear down framework | framework.go:193 + STEP: Destroying namespace "container-lifecycle-hook-5460" for this suite. 08/24/23 12:48:16.573 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] CronJob + should support CronJob API operations [Conformance] + test/e2e/apps/cronjob.go:319 +[BeforeEach] [sig-apps] CronJob + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:48:16.587 +Aug 24 12:48:16.587: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename cronjob 08/24/23 12:48:16.591 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:16.62 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:16.624 +[BeforeEach] [sig-apps] CronJob + test/e2e/framework/metrics/init/init.go:31 +[It] should support CronJob API operations [Conformance] + test/e2e/apps/cronjob.go:319 +STEP: Creating a cronjob 08/24/23 12:48:16.629 +STEP: creating 08/24/23 12:48:16.629 +STEP: getting 08/24/23 12:48:16.641 +STEP: listing 08/24/23 12:48:16.646 +STEP: watching 08/24/23 12:48:16.651 +Aug 24 12:48:16.652: INFO: starting watch +STEP: cluster-wide listing 08/24/23 12:48:16.653 +STEP: cluster-wide watching 08/24/23 12:48:16.658 +Aug 24 12:48:16.659: INFO: starting watch +STEP: patching 08/24/23 12:48:16.661 +STEP: updating 08/24/23 12:48:16.671 +Aug 24 12:48:16.683: INFO: waiting for watch events with expected annotations +Aug 24 12:48:16.684: INFO: saw patched and updated annotations +STEP: patching /status 08/24/23 12:48:16.684 +STEP: updating /status 08/24/23 12:48:16.696 +STEP: get /status 08/24/23 12:48:16.711 +STEP: deleting 08/24/23 12:48:16.719 +STEP: deleting a collection 08/24/23 12:48:16.748 +[AfterEach] [sig-apps] CronJob + test/e2e/framework/node/init/init.go:32 +Aug 24 12:48:16.772: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] CronJob + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-apps] CronJob + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 -STEP: Destroying namespace "projected-3257" for this suite. 07/29/23 16:40:25.639 +STEP: Destroying namespace "cronjob-2986" for this suite. 08/24/23 12:48:16.779 ------------------------------ -• [SLOW TEST] [6.704 seconds] -[sig-storage] Projected downwardAPI -test/e2e/common/storage/framework.go:23 - should update labels on modification [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:130 +• [0.225 seconds] +[sig-apps] CronJob +test/e2e/apps/framework.go:23 + should support CronJob API operations [Conformance] + test/e2e/apps/cronjob.go:319 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:40:18.95 - Jul 29 16:40:18.950: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:40:18.952 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:18.977 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:18.981 - [BeforeEach] [sig-storage] Projected downwardAPI + STEP: Creating a kubernetes client 08/24/23 12:48:16.587 + Aug 24 12:48:16.587: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename cronjob 08/24/23 12:48:16.591 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:16.62 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:16.624 + [BeforeEach] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should update labels on modification [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:130 - STEP: Creating the pod 07/29/23 16:40:18.985 - Jul 29 16:40:19.000: INFO: Waiting up to 5m0s for pod "labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7" in namespace "projected-3257" to be "running and ready" - Jul 29 16:40:19.019: INFO: Pod "labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7": Phase="Pending", Reason="", readiness=false. Elapsed: 18.870949ms - Jul 29 16:40:19.019: INFO: The phase of Pod labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:40:21.026: INFO: Pod "labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7": Phase="Running", Reason="", readiness=true. Elapsed: 2.026362326s - Jul 29 16:40:21.027: INFO: The phase of Pod labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7 is Running (Ready = true) - Jul 29 16:40:21.027: INFO: Pod "labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7" satisfied condition "running and ready" - Jul 29 16:40:21.570: INFO: Successfully updated pod "labelsupdate2a6ccef9-e752-4352-9ce0-c4789e356ef7" - [AfterEach] [sig-storage] Projected downwardAPI + [It] should support CronJob API operations [Conformance] + test/e2e/apps/cronjob.go:319 + STEP: Creating a cronjob 08/24/23 12:48:16.629 + STEP: creating 08/24/23 12:48:16.629 + STEP: getting 08/24/23 12:48:16.641 + STEP: listing 08/24/23 12:48:16.646 + STEP: watching 08/24/23 12:48:16.651 + Aug 24 12:48:16.652: INFO: starting watch + STEP: cluster-wide listing 08/24/23 12:48:16.653 + STEP: cluster-wide watching 08/24/23 12:48:16.658 + Aug 24 12:48:16.659: INFO: starting watch + STEP: patching 08/24/23 12:48:16.661 + STEP: updating 08/24/23 12:48:16.671 + Aug 24 12:48:16.683: INFO: waiting for watch events with expected annotations + Aug 24 12:48:16.684: INFO: saw patched and updated annotations + STEP: patching /status 08/24/23 12:48:16.684 + STEP: updating /status 08/24/23 12:48:16.696 + STEP: get /status 08/24/23 12:48:16.711 + STEP: deleting 08/24/23 12:48:16.719 + STEP: deleting a collection 08/24/23 12:48:16.748 + [AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 - Jul 29 16:40:25.630: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 12:48:16.772: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 - STEP: Destroying namespace "projected-3257" for this suite. 07/29/23 16:40:25.639 + STEP: Destroying namespace "cronjob-2986" for this suite. 08/24/23 12:48:16.779 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-storage] EmptyDir volumes - should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:177 -[BeforeEach] [sig-storage] EmptyDir volumes +[sig-storage] Projected configMap + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:74 +[BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:40:25.66 -Jul 29 16:40:25.660: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 16:40:25.662 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:25.694 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:25.698 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 12:48:16.812 +Aug 24 12:48:16.812: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:48:16.814 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:16.839 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:16.844 +[BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 -[It] should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:177 -STEP: Creating a pod to test emptydir 0666 on node default medium 07/29/23 16:40:25.702 -Jul 29 16:40:25.715: INFO: Waiting up to 5m0s for pod "pod-b1103566-28c4-4e8c-a11b-c0594332dffb" in namespace "emptydir-3740" to be "Succeeded or Failed" -Jul 29 16:40:25.724: INFO: Pod "pod-b1103566-28c4-4e8c-a11b-c0594332dffb": Phase="Pending", Reason="", readiness=false. Elapsed: 9.748391ms -Jul 29 16:40:27.732: INFO: Pod "pod-b1103566-28c4-4e8c-a11b-c0594332dffb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017759719s -Jul 29 16:40:29.732: INFO: Pod "pod-b1103566-28c4-4e8c-a11b-c0594332dffb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017763068s -STEP: Saw pod success 07/29/23 16:40:29.732 -Jul 29 16:40:29.733: INFO: Pod "pod-b1103566-28c4-4e8c-a11b-c0594332dffb" satisfied condition "Succeeded or Failed" -Jul 29 16:40:29.737: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-b1103566-28c4-4e8c-a11b-c0594332dffb container test-container: -STEP: delete the pod 07/29/23 16:40:29.75 -Jul 29 16:40:29.775: INFO: Waiting for pod pod-b1103566-28c4-4e8c-a11b-c0594332dffb to disappear -Jul 29 16:40:29.780: INFO: Pod pod-b1103566-28c4-4e8c-a11b-c0594332dffb no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:74 +STEP: Creating configMap with name projected-configmap-test-volume-01fa1198-388a-430e-8e02-af5f9dc6b8e3 08/24/23 12:48:16.848 +STEP: Creating a pod to test consume configMaps 08/24/23 12:48:16.858 +Aug 24 12:48:16.878: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a" in namespace "projected-3319" to be "Succeeded or Failed" +Aug 24 12:48:16.893: INFO: Pod "pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a": Phase="Pending", Reason="", readiness=false. Elapsed: 15.073579ms +Aug 24 12:48:18.902: INFO: Pod "pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02358387s +Aug 24 12:48:20.901: INFO: Pod "pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022798717s +STEP: Saw pod success 08/24/23 12:48:20.901 +Aug 24 12:48:20.902: INFO: Pod "pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a" satisfied condition "Succeeded or Failed" +Aug 24 12:48:20.907: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a container agnhost-container: +STEP: delete the pod 08/24/23 12:48:20.92 +Aug 24 12:48:20.942: INFO: Waiting for pod pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a to disappear +Aug 24 12:48:20.948: INFO: Pod pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a no longer exists +[AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:40:29.780: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 12:48:20.948: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-3740" for this suite. 07/29/23 16:40:29.789 +STEP: Destroying namespace "projected-3319" for this suite. 08/24/23 12:48:20.961 ------------------------------ -• [4.145 seconds] -[sig-storage] EmptyDir volumes +• [4.163 seconds] +[sig-storage] Projected configMap test/e2e/common/storage/framework.go:23 - should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:177 + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:74 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:40:25.66 - Jul 29 16:40:25.660: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 16:40:25.662 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:25.694 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:25.698 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 12:48:16.812 + Aug 24 12:48:16.812: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:48:16.814 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:16.839 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:16.844 + [BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 - [It] should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:177 - STEP: Creating a pod to test emptydir 0666 on node default medium 07/29/23 16:40:25.702 - Jul 29 16:40:25.715: INFO: Waiting up to 5m0s for pod "pod-b1103566-28c4-4e8c-a11b-c0594332dffb" in namespace "emptydir-3740" to be "Succeeded or Failed" - Jul 29 16:40:25.724: INFO: Pod "pod-b1103566-28c4-4e8c-a11b-c0594332dffb": Phase="Pending", Reason="", readiness=false. Elapsed: 9.748391ms - Jul 29 16:40:27.732: INFO: Pod "pod-b1103566-28c4-4e8c-a11b-c0594332dffb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017759719s - Jul 29 16:40:29.732: INFO: Pod "pod-b1103566-28c4-4e8c-a11b-c0594332dffb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017763068s - STEP: Saw pod success 07/29/23 16:40:29.732 - Jul 29 16:40:29.733: INFO: Pod "pod-b1103566-28c4-4e8c-a11b-c0594332dffb" satisfied condition "Succeeded or Failed" - Jul 29 16:40:29.737: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-b1103566-28c4-4e8c-a11b-c0594332dffb container test-container: - STEP: delete the pod 07/29/23 16:40:29.75 - Jul 29 16:40:29.775: INFO: Waiting for pod pod-b1103566-28c4-4e8c-a11b-c0594332dffb to disappear - Jul 29 16:40:29.780: INFO: Pod pod-b1103566-28c4-4e8c-a11b-c0594332dffb no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:74 + STEP: Creating configMap with name projected-configmap-test-volume-01fa1198-388a-430e-8e02-af5f9dc6b8e3 08/24/23 12:48:16.848 + STEP: Creating a pod to test consume configMaps 08/24/23 12:48:16.858 + Aug 24 12:48:16.878: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a" in namespace "projected-3319" to be "Succeeded or Failed" + Aug 24 12:48:16.893: INFO: Pod "pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a": Phase="Pending", Reason="", readiness=false. Elapsed: 15.073579ms + Aug 24 12:48:18.902: INFO: Pod "pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02358387s + Aug 24 12:48:20.901: INFO: Pod "pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022798717s + STEP: Saw pod success 08/24/23 12:48:20.901 + Aug 24 12:48:20.902: INFO: Pod "pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a" satisfied condition "Succeeded or Failed" + Aug 24 12:48:20.907: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a container agnhost-container: + STEP: delete the pod 08/24/23 12:48:20.92 + Aug 24 12:48:20.942: INFO: Waiting for pod pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a to disappear + Aug 24 12:48:20.948: INFO: Pod pod-projected-configmaps-e98578c8-6ae3-47be-86dd-adac842ac64a no longer exists + [AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:40:29.780: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 12:48:20.948: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-3740" for this suite. 07/29/23 16:40:29.789 + STEP: Destroying namespace "projected-3319" for this suite. 08/24/23 12:48:20.961 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------- [sig-network] DNS - should provide DNS for pods for Subdomain [Conformance] - test/e2e/network/dns.go:290 + should provide DNS for ExternalName services [Conformance] + test/e2e/network/dns.go:333 [BeforeEach] [sig-network] DNS set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:40:29.815 -Jul 29 16:40:29.815: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename dns 07/29/23 16:40:29.817 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:29.842 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:29.848 +STEP: Creating a kubernetes client 08/24/23 12:48:20.975 +Aug 24 12:48:20.975: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename dns 08/24/23 12:48:20.978 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:21.008 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:21.014 [BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 -[It] should provide DNS for pods for Subdomain [Conformance] - test/e2e/network/dns.go:290 -STEP: Creating a test headless service 07/29/23 16:40:29.854 -STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local;sleep 1; done - 07/29/23 16:40:29.864 -STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local;sleep 1; done - 07/29/23 16:40:29.864 -STEP: creating a pod to probe DNS 07/29/23 16:40:29.865 -STEP: submitting the pod to kubernetes 07/29/23 16:40:29.865 -Jul 29 16:40:29.888: INFO: Waiting up to 15m0s for pod "dns-test-88a74ab6-5956-493f-9ca7-42c690262615" in namespace "dns-2566" to be "running" -Jul 29 16:40:29.896: INFO: Pod "dns-test-88a74ab6-5956-493f-9ca7-42c690262615": Phase="Pending", Reason="", readiness=false. Elapsed: 7.509076ms -Jul 29 16:40:31.908: INFO: Pod "dns-test-88a74ab6-5956-493f-9ca7-42c690262615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020031497s -Jul 29 16:40:33.904: INFO: Pod "dns-test-88a74ab6-5956-493f-9ca7-42c690262615": Phase="Running", Reason="", readiness=true. Elapsed: 4.015662614s -Jul 29 16:40:33.904: INFO: Pod "dns-test-88a74ab6-5956-493f-9ca7-42c690262615" satisfied condition "running" -STEP: retrieving the pod 07/29/23 16:40:33.904 -STEP: looking for the results for each expected name from probers 07/29/23 16:40:33.91 -Jul 29 16:40:33.920: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:33.926: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:33.932: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:33.941: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:33.951: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:33.957: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:33.962: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:33.967: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:33.968: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - -Jul 29 16:40:38.982: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:38.988: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:38.995: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:39.001: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:39.007: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:39.013: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:39.020: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:39.025: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:39.025: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - -Jul 29 16:40:43.983: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:43.990: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:43.998: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:44.006: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:44.012: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:44.018: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:44.025: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:44.034: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:44.034: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - -Jul 29 16:40:48.982: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:48.995: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:49.003: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:49.011: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:49.022: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:49.032: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:49.042: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:49.056: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:49.058: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - -Jul 29 16:40:53.980: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:53.986: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:53.995: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:54.003: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:54.009: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:54.020: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:54.027: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:54.034: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:54.034: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - -Jul 29 16:40:58.980: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:58.988: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:58.994: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:59.000: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:59.007: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:59.013: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:59.020: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:59.025: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) -Jul 29 16:40:59.025: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - -Jul 29 16:41:04.051: INFO: DNS probes using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 succeeded - -STEP: deleting the pod 07/29/23 16:41:04.052 -STEP: deleting the test headless service 07/29/23 16:41:04.084 +[It] should provide DNS for ExternalName services [Conformance] + test/e2e/network/dns.go:333 +STEP: Creating a test externalName service 08/24/23 12:48:21.02 +STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:21.031 +STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:21.032 +STEP: creating a pod to probe DNS 08/24/23 12:48:21.032 +STEP: submitting the pod to kubernetes 08/24/23 12:48:21.032 +Aug 24 12:48:21.061: INFO: Waiting up to 15m0s for pod "dns-test-d71ac2b1-5f60-4abf-9e5e-96cdc1fb2a2c" in namespace "dns-6893" to be "running" +Aug 24 12:48:21.074: INFO: Pod "dns-test-d71ac2b1-5f60-4abf-9e5e-96cdc1fb2a2c": Phase="Pending", Reason="", readiness=false. Elapsed: 13.687062ms +Aug 24 12:48:23.083: INFO: Pod "dns-test-d71ac2b1-5f60-4abf-9e5e-96cdc1fb2a2c": Phase="Running", Reason="", readiness=true. Elapsed: 2.022355005s +Aug 24 12:48:23.083: INFO: Pod "dns-test-d71ac2b1-5f60-4abf-9e5e-96cdc1fb2a2c" satisfied condition "running" +STEP: retrieving the pod 08/24/23 12:48:23.083 +STEP: looking for the results for each expected name from probers 08/24/23 12:48:23.089 +Aug 24 12:48:23.106: INFO: DNS probes using dns-test-d71ac2b1-5f60-4abf-9e5e-96cdc1fb2a2c succeeded + +STEP: deleting the pod 08/24/23 12:48:23.106 +STEP: changing the externalName to bar.example.com 08/24/23 12:48:23.128 +STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:23.15 +STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:23.15 +STEP: creating a second pod to probe DNS 08/24/23 12:48:23.15 +STEP: submitting the pod to kubernetes 08/24/23 12:48:23.151 +Aug 24 12:48:23.166: INFO: Waiting up to 15m0s for pod "dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3" in namespace "dns-6893" to be "running" +Aug 24 12:48:23.180: INFO: Pod "dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3": Phase="Pending", Reason="", readiness=false. Elapsed: 14.396638ms +Aug 24 12:48:25.193: INFO: Pod "dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3": Phase="Running", Reason="", readiness=true. Elapsed: 2.027037822s +Aug 24 12:48:25.193: INFO: Pod "dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3" satisfied condition "running" +STEP: retrieving the pod 08/24/23 12:48:25.193 +STEP: looking for the results for each expected name from probers 08/24/23 12:48:25.202 +Aug 24 12:48:25.220: INFO: File jessie_udp@dns-test-service-3.dns-6893.svc.cluster.local from pod dns-6893/dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3 contains 'foo.example.com. +' instead of 'bar.example.com.' +Aug 24 12:48:25.220: INFO: Lookups using dns-6893/dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3 failed for: [jessie_udp@dns-test-service-3.dns-6893.svc.cluster.local] + +Aug 24 12:48:30.237: INFO: DNS probes using dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3 succeeded + +STEP: deleting the pod 08/24/23 12:48:30.237 +STEP: changing the service to type=ClusterIP 08/24/23 12:48:30.257 +STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local A > /results/wheezy_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:30.331 +STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local A > /results/jessie_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:30.331 +STEP: creating a third pod to probe DNS 08/24/23 12:48:30.332 +STEP: submitting the pod to kubernetes 08/24/23 12:48:30.339 +Aug 24 12:48:30.352: INFO: Waiting up to 15m0s for pod "dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7" in namespace "dns-6893" to be "running" +Aug 24 12:48:30.363: INFO: Pod "dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7": Phase="Pending", Reason="", readiness=false. Elapsed: 10.74265ms +Aug 24 12:48:32.372: INFO: Pod "dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019965081s +Aug 24 12:48:34.374: INFO: Pod "dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7": Phase="Running", Reason="", readiness=true. Elapsed: 4.02185716s +Aug 24 12:48:34.374: INFO: Pod "dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7" satisfied condition "running" +STEP: retrieving the pod 08/24/23 12:48:34.374 +STEP: looking for the results for each expected name from probers 08/24/23 12:48:34.384 +Aug 24 12:48:34.405: INFO: DNS probes using dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7 succeeded + +STEP: deleting the pod 08/24/23 12:48:34.405 +STEP: deleting the test externalName service 08/24/23 12:48:34.431 [AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 -Jul 29 16:41:04.158: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:48:34.467: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 -STEP: Destroying namespace "dns-2566" for this suite. 07/29/23 16:41:04.176 +STEP: Destroying namespace "dns-6893" for this suite. 08/24/23 12:48:34.477 ------------------------------ -• [SLOW TEST] [34.386 seconds] +• [SLOW TEST] [13.518 seconds] [sig-network] DNS test/e2e/network/common/framework.go:23 - should provide DNS for pods for Subdomain [Conformance] - test/e2e/network/dns.go:290 + should provide DNS for ExternalName services [Conformance] + test/e2e/network/dns.go:333 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-network] DNS set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:40:29.815 - Jul 29 16:40:29.815: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename dns 07/29/23 16:40:29.817 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:40:29.842 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:40:29.848 + STEP: Creating a kubernetes client 08/24/23 12:48:20.975 + Aug 24 12:48:20.975: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename dns 08/24/23 12:48:20.978 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:21.008 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:21.014 [BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 - [It] should provide DNS for pods for Subdomain [Conformance] - test/e2e/network/dns.go:290 - STEP: Creating a test headless service 07/29/23 16:40:29.854 - STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local;sleep 1; done - 07/29/23 16:40:29.864 - STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-2566.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local;sleep 1; done - 07/29/23 16:40:29.864 - STEP: creating a pod to probe DNS 07/29/23 16:40:29.865 - STEP: submitting the pod to kubernetes 07/29/23 16:40:29.865 - Jul 29 16:40:29.888: INFO: Waiting up to 15m0s for pod "dns-test-88a74ab6-5956-493f-9ca7-42c690262615" in namespace "dns-2566" to be "running" - Jul 29 16:40:29.896: INFO: Pod "dns-test-88a74ab6-5956-493f-9ca7-42c690262615": Phase="Pending", Reason="", readiness=false. Elapsed: 7.509076ms - Jul 29 16:40:31.908: INFO: Pod "dns-test-88a74ab6-5956-493f-9ca7-42c690262615": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020031497s - Jul 29 16:40:33.904: INFO: Pod "dns-test-88a74ab6-5956-493f-9ca7-42c690262615": Phase="Running", Reason="", readiness=true. Elapsed: 4.015662614s - Jul 29 16:40:33.904: INFO: Pod "dns-test-88a74ab6-5956-493f-9ca7-42c690262615" satisfied condition "running" - STEP: retrieving the pod 07/29/23 16:40:33.904 - STEP: looking for the results for each expected name from probers 07/29/23 16:40:33.91 - Jul 29 16:40:33.920: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:33.926: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:33.932: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:33.941: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:33.951: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:33.957: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:33.962: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:33.967: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:33.968: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - - Jul 29 16:40:38.982: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:38.988: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:38.995: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:39.001: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:39.007: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:39.013: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:39.020: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:39.025: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:39.025: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - - Jul 29 16:40:43.983: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:43.990: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:43.998: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:44.006: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:44.012: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:44.018: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:44.025: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:44.034: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:44.034: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - - Jul 29 16:40:48.982: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:48.995: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:49.003: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:49.011: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:49.022: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:49.032: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:49.042: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:49.056: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:49.058: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - - Jul 29 16:40:53.980: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:53.986: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:53.995: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:54.003: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:54.009: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:54.020: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:54.027: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:54.034: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:54.034: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - - Jul 29 16:40:58.980: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:58.988: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:58.994: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:59.000: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:59.007: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:59.013: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:59.020: INFO: Unable to read jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:59.025: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local from pod dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615: the server could not find the requested resource (get pods dns-test-88a74ab6-5956-493f-9ca7-42c690262615) - Jul 29 16:40:59.025: INFO: Lookups using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local wheezy_udp@dns-test-service-2.dns-2566.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-2566.svc.cluster.local jessie_udp@dns-test-service-2.dns-2566.svc.cluster.local jessie_tcp@dns-test-service-2.dns-2566.svc.cluster.local] - - Jul 29 16:41:04.051: INFO: DNS probes using dns-2566/dns-test-88a74ab6-5956-493f-9ca7-42c690262615 succeeded - - STEP: deleting the pod 07/29/23 16:41:04.052 - STEP: deleting the test headless service 07/29/23 16:41:04.084 + [It] should provide DNS for ExternalName services [Conformance] + test/e2e/network/dns.go:333 + STEP: Creating a test externalName service 08/24/23 12:48:21.02 + STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:21.031 + STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:21.032 + STEP: creating a pod to probe DNS 08/24/23 12:48:21.032 + STEP: submitting the pod to kubernetes 08/24/23 12:48:21.032 + Aug 24 12:48:21.061: INFO: Waiting up to 15m0s for pod "dns-test-d71ac2b1-5f60-4abf-9e5e-96cdc1fb2a2c" in namespace "dns-6893" to be "running" + Aug 24 12:48:21.074: INFO: Pod "dns-test-d71ac2b1-5f60-4abf-9e5e-96cdc1fb2a2c": Phase="Pending", Reason="", readiness=false. Elapsed: 13.687062ms + Aug 24 12:48:23.083: INFO: Pod "dns-test-d71ac2b1-5f60-4abf-9e5e-96cdc1fb2a2c": Phase="Running", Reason="", readiness=true. Elapsed: 2.022355005s + Aug 24 12:48:23.083: INFO: Pod "dns-test-d71ac2b1-5f60-4abf-9e5e-96cdc1fb2a2c" satisfied condition "running" + STEP: retrieving the pod 08/24/23 12:48:23.083 + STEP: looking for the results for each expected name from probers 08/24/23 12:48:23.089 + Aug 24 12:48:23.106: INFO: DNS probes using dns-test-d71ac2b1-5f60-4abf-9e5e-96cdc1fb2a2c succeeded + + STEP: deleting the pod 08/24/23 12:48:23.106 + STEP: changing the externalName to bar.example.com 08/24/23 12:48:23.128 + STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:23.15 + STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:23.15 + STEP: creating a second pod to probe DNS 08/24/23 12:48:23.15 + STEP: submitting the pod to kubernetes 08/24/23 12:48:23.151 + Aug 24 12:48:23.166: INFO: Waiting up to 15m0s for pod "dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3" in namespace "dns-6893" to be "running" + Aug 24 12:48:23.180: INFO: Pod "dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3": Phase="Pending", Reason="", readiness=false. Elapsed: 14.396638ms + Aug 24 12:48:25.193: INFO: Pod "dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3": Phase="Running", Reason="", readiness=true. Elapsed: 2.027037822s + Aug 24 12:48:25.193: INFO: Pod "dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3" satisfied condition "running" + STEP: retrieving the pod 08/24/23 12:48:25.193 + STEP: looking for the results for each expected name from probers 08/24/23 12:48:25.202 + Aug 24 12:48:25.220: INFO: File jessie_udp@dns-test-service-3.dns-6893.svc.cluster.local from pod dns-6893/dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3 contains 'foo.example.com. + ' instead of 'bar.example.com.' + Aug 24 12:48:25.220: INFO: Lookups using dns-6893/dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3 failed for: [jessie_udp@dns-test-service-3.dns-6893.svc.cluster.local] + + Aug 24 12:48:30.237: INFO: DNS probes using dns-test-7a9e124a-37f8-44de-bcaf-6ebe25e0daa3 succeeded + + STEP: deleting the pod 08/24/23 12:48:30.237 + STEP: changing the service to type=ClusterIP 08/24/23 12:48:30.257 + STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local A > /results/wheezy_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:30.331 + STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6893.svc.cluster.local A > /results/jessie_udp@dns-test-service-3.dns-6893.svc.cluster.local; sleep 1; done + 08/24/23 12:48:30.331 + STEP: creating a third pod to probe DNS 08/24/23 12:48:30.332 + STEP: submitting the pod to kubernetes 08/24/23 12:48:30.339 + Aug 24 12:48:30.352: INFO: Waiting up to 15m0s for pod "dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7" in namespace "dns-6893" to be "running" + Aug 24 12:48:30.363: INFO: Pod "dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7": Phase="Pending", Reason="", readiness=false. Elapsed: 10.74265ms + Aug 24 12:48:32.372: INFO: Pod "dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019965081s + Aug 24 12:48:34.374: INFO: Pod "dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7": Phase="Running", Reason="", readiness=true. Elapsed: 4.02185716s + Aug 24 12:48:34.374: INFO: Pod "dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7" satisfied condition "running" + STEP: retrieving the pod 08/24/23 12:48:34.374 + STEP: looking for the results for each expected name from probers 08/24/23 12:48:34.384 + Aug 24 12:48:34.405: INFO: DNS probes using dns-test-9419b4df-14e7-4830-8482-e1c10ce779c7 succeeded + + STEP: deleting the pod 08/24/23 12:48:34.405 + STEP: deleting the test externalName service 08/24/23 12:48:34.431 [AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 - Jul 29 16:41:04.158: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:48:34.467: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 - STEP: Destroying namespace "dns-2566" for this suite. 07/29/23 16:41:04.176 - << End Captured GinkgoWriter Output ------------------------------- -S ------------------------------- -[sig-apps] Daemon set [Serial] - should run and stop simple daemon [Conformance] - test/e2e/apps/daemon_set.go:177 -[BeforeEach] [sig-apps] Daemon set [Serial] - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:41:04.203 -Jul 29 16:41:04.203: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename daemonsets 07/29/23 16:41:04.21 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:41:04.242 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:41:04.248 -[BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 -[It] should run and stop simple daemon [Conformance] - test/e2e/apps/daemon_set.go:177 -STEP: Creating simple DaemonSet "daemon-set" 07/29/23 16:41:04.287 -STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 16:41:04.296 -Jul 29 16:41:04.316: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:41:04.316: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:41:05.335: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:41:05.335: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:41:06.343: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:41:06.344: INFO: Node wetuj3nuajog-2 is running 0 daemon pod, expected 1 -Jul 29 16:41:07.333: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 -Jul 29 16:41:07.334: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set -STEP: Stop a daemon pod, check that the daemon pod is revived. 07/29/23 16:41:07.341 -Jul 29 16:41:07.387: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:41:07.387: INFO: Node wetuj3nuajog-2 is running 0 daemon pod, expected 1 -Jul 29 16:41:08.411: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:41:08.412: INFO: Node wetuj3nuajog-2 is running 0 daemon pod, expected 1 -Jul 29 16:41:09.425: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:41:09.426: INFO: Node wetuj3nuajog-2 is running 0 daemon pod, expected 1 -Jul 29 16:41:10.405: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 -Jul 29 16:41:10.405: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set -[AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 -STEP: Deleting DaemonSet "daemon-set" 07/29/23 16:41:10.417 -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-8163, will wait for the garbage collector to delete the pods 07/29/23 16:41:10.417 -Jul 29 16:41:10.492: INFO: Deleting DaemonSet.extensions daemon-set took: 15.339959ms -Jul 29 16:41:10.593: INFO: Terminating DaemonSet.extensions daemon-set pods took: 101.073584ms -Jul 29 16:41:13.503: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:41:13.503: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set -Jul 29 16:41:13.509: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"28732"},"items":null} - -Jul 29 16:41:13.514: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"28732"},"items":null} - -[AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/framework/node/init/init.go:32 -Jul 29 16:41:13.546: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] - tear down framework | framework.go:193 -STEP: Destroying namespace "daemonsets-8163" for this suite. 07/29/23 16:41:13.553 ------------------------------- -• [SLOW TEST] [9.361 seconds] -[sig-apps] Daemon set [Serial] -test/e2e/apps/framework.go:23 - should run and stop simple daemon [Conformance] - test/e2e/apps/daemon_set.go:177 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Daemon set [Serial] - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:41:04.203 - Jul 29 16:41:04.203: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename daemonsets 07/29/23 16:41:04.21 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:41:04.242 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:41:04.248 - [BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 - [It] should run and stop simple daemon [Conformance] - test/e2e/apps/daemon_set.go:177 - STEP: Creating simple DaemonSet "daemon-set" 07/29/23 16:41:04.287 - STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 16:41:04.296 - Jul 29 16:41:04.316: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:41:04.316: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:41:05.335: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:41:05.335: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:41:06.343: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:41:06.344: INFO: Node wetuj3nuajog-2 is running 0 daemon pod, expected 1 - Jul 29 16:41:07.333: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 - Jul 29 16:41:07.334: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set - STEP: Stop a daemon pod, check that the daemon pod is revived. 07/29/23 16:41:07.341 - Jul 29 16:41:07.387: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:41:07.387: INFO: Node wetuj3nuajog-2 is running 0 daemon pod, expected 1 - Jul 29 16:41:08.411: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:41:08.412: INFO: Node wetuj3nuajog-2 is running 0 daemon pod, expected 1 - Jul 29 16:41:09.425: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:41:09.426: INFO: Node wetuj3nuajog-2 is running 0 daemon pod, expected 1 - Jul 29 16:41:10.405: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 - Jul 29 16:41:10.405: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set - [AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 - STEP: Deleting DaemonSet "daemon-set" 07/29/23 16:41:10.417 - STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-8163, will wait for the garbage collector to delete the pods 07/29/23 16:41:10.417 - Jul 29 16:41:10.492: INFO: Deleting DaemonSet.extensions daemon-set took: 15.339959ms - Jul 29 16:41:10.593: INFO: Terminating DaemonSet.extensions daemon-set pods took: 101.073584ms - Jul 29 16:41:13.503: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:41:13.503: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set - Jul 29 16:41:13.509: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"28732"},"items":null} - - Jul 29 16:41:13.514: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"28732"},"items":null} - - [AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/framework/node/init/init.go:32 - Jul 29 16:41:13.546: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] - tear down framework | framework.go:193 - STEP: Destroying namespace "daemonsets-8163" for this suite. 07/29/23 16:41:13.553 + STEP: Destroying namespace "dns-6893" for this suite. 08/24/23 12:48:34.477 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch - watch on custom resource definition objects [Conformance] - test/e2e/apimachinery/crd_watch.go:51 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +[sig-storage] EmptyDir volumes + should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:137 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:41:13.565 -Jul 29 16:41:13.565: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-watch 07/29/23 16:41:13.568 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:41:13.612 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:41:13.62 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:48:34.503 +Aug 24 12:48:34.504: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:48:34.506 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:34.537 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:34.544 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[It] watch on custom resource definition objects [Conformance] - test/e2e/apimachinery/crd_watch.go:51 -Jul 29 16:41:13.626: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Creating first CR 07/29/23 16:41:16.382 -Jul 29 16:41:16.396: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:16Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:16Z]] name:name1 resourceVersion:28749 uid:062c4728-e610-43f9-a270-fbeaba3d66d4] num:map[num1:9223372036854775807 num2:1000000]]} -STEP: Creating second CR 07/29/23 16:41:26.396 -Jul 29 16:41:26.412: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:26Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:26Z]] name:name2 resourceVersion:28801 uid:85f7bace-2682-4e74-b503-a60c325f0502] num:map[num1:9223372036854775807 num2:1000000]]} -STEP: Modifying first CR 07/29/23 16:41:36.413 -Jul 29 16:41:36.427: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:16Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:36Z]] name:name1 resourceVersion:28824 uid:062c4728-e610-43f9-a270-fbeaba3d66d4] num:map[num1:9223372036854775807 num2:1000000]]} -STEP: Modifying second CR 07/29/23 16:41:46.431 -Jul 29 16:41:46.447: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:26Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:46Z]] name:name2 resourceVersion:28847 uid:85f7bace-2682-4e74-b503-a60c325f0502] num:map[num1:9223372036854775807 num2:1000000]]} -STEP: Deleting first CR 07/29/23 16:41:56.451 -Jul 29 16:41:56.468: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:16Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:36Z]] name:name1 resourceVersion:28870 uid:062c4728-e610-43f9-a270-fbeaba3d66d4] num:map[num1:9223372036854775807 num2:1000000]]} -STEP: Deleting second CR 07/29/23 16:42:06.47 -Jul 29 16:42:06.507: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:26Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:46Z]] name:name2 resourceVersion:28893 uid:85f7bace-2682-4e74-b503-a60c325f0502] num:map[num1:9223372036854775807 num2:1000000]]} -[AfterEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +[It] should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:137 +STEP: Creating a pod to test emptydir 0666 on tmpfs 08/24/23 12:48:34.549 +Aug 24 12:48:34.562: INFO: Waiting up to 5m0s for pod "pod-ac7be750-d190-4700-bcac-ce2317ae0c6e" in namespace "emptydir-9232" to be "Succeeded or Failed" +Aug 24 12:48:34.579: INFO: Pod "pod-ac7be750-d190-4700-bcac-ce2317ae0c6e": Phase="Pending", Reason="", readiness=false. Elapsed: 16.431982ms +Aug 24 12:48:36.585: INFO: Pod "pod-ac7be750-d190-4700-bcac-ce2317ae0c6e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023084401s +Aug 24 12:48:38.587: INFO: Pod "pod-ac7be750-d190-4700-bcac-ce2317ae0c6e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025082521s +STEP: Saw pod success 08/24/23 12:48:38.588 +Aug 24 12:48:38.588: INFO: Pod "pod-ac7be750-d190-4700-bcac-ce2317ae0c6e" satisfied condition "Succeeded or Failed" +Aug 24 12:48:38.595: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-ac7be750-d190-4700-bcac-ce2317ae0c6e container test-container: +STEP: delete the pod 08/24/23 12:48:38.607 +Aug 24 12:48:38.626: INFO: Waiting for pod pod-ac7be750-d190-4700-bcac-ce2317ae0c6e to disappear +Aug 24 12:48:38.633: INFO: Pod pod-ac7be750-d190-4700-bcac-ce2317ae0c6e no longer exists +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:42:17.041: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +Aug 24 12:48:38.633: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "crd-watch-1877" for this suite. 07/29/23 16:42:17.053 +STEP: Destroying namespace "emptydir-9232" for this suite. 08/24/23 12:48:38.643 ------------------------------ -• [SLOW TEST] [63.503 seconds] -[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - CustomResourceDefinition Watch - test/e2e/apimachinery/crd_watch.go:44 - watch on custom resource definition objects [Conformance] - test/e2e/apimachinery/crd_watch.go:51 +• [4.153 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:137 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:41:13.565 - Jul 29 16:41:13.565: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-watch 07/29/23 16:41:13.568 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:41:13.612 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:41:13.62 - [BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:48:34.503 + Aug 24 12:48:34.504: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:48:34.506 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:34.537 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:34.544 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [It] watch on custom resource definition objects [Conformance] - test/e2e/apimachinery/crd_watch.go:51 - Jul 29 16:41:13.626: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Creating first CR 07/29/23 16:41:16.382 - Jul 29 16:41:16.396: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:16Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:16Z]] name:name1 resourceVersion:28749 uid:062c4728-e610-43f9-a270-fbeaba3d66d4] num:map[num1:9223372036854775807 num2:1000000]]} - STEP: Creating second CR 07/29/23 16:41:26.396 - Jul 29 16:41:26.412: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:26Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:26Z]] name:name2 resourceVersion:28801 uid:85f7bace-2682-4e74-b503-a60c325f0502] num:map[num1:9223372036854775807 num2:1000000]]} - STEP: Modifying first CR 07/29/23 16:41:36.413 - Jul 29 16:41:36.427: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:16Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:36Z]] name:name1 resourceVersion:28824 uid:062c4728-e610-43f9-a270-fbeaba3d66d4] num:map[num1:9223372036854775807 num2:1000000]]} - STEP: Modifying second CR 07/29/23 16:41:46.431 - Jul 29 16:41:46.447: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:26Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:46Z]] name:name2 resourceVersion:28847 uid:85f7bace-2682-4e74-b503-a60c325f0502] num:map[num1:9223372036854775807 num2:1000000]]} - STEP: Deleting first CR 07/29/23 16:41:56.451 - Jul 29 16:41:56.468: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:16Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:36Z]] name:name1 resourceVersion:28870 uid:062c4728-e610-43f9-a270-fbeaba3d66d4] num:map[num1:9223372036854775807 num2:1000000]]} - STEP: Deleting second CR 07/29/23 16:42:06.47 - Jul 29 16:42:06.507: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2023-07-29T16:41:26Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2023-07-29T16:41:46Z]] name:name2 resourceVersion:28893 uid:85f7bace-2682-4e74-b503-a60c325f0502] num:map[num1:9223372036854775807 num2:1000000]]} - [AfterEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] + [It] should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:137 + STEP: Creating a pod to test emptydir 0666 on tmpfs 08/24/23 12:48:34.549 + Aug 24 12:48:34.562: INFO: Waiting up to 5m0s for pod "pod-ac7be750-d190-4700-bcac-ce2317ae0c6e" in namespace "emptydir-9232" to be "Succeeded or Failed" + Aug 24 12:48:34.579: INFO: Pod "pod-ac7be750-d190-4700-bcac-ce2317ae0c6e": Phase="Pending", Reason="", readiness=false. Elapsed: 16.431982ms + Aug 24 12:48:36.585: INFO: Pod "pod-ac7be750-d190-4700-bcac-ce2317ae0c6e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023084401s + Aug 24 12:48:38.587: INFO: Pod "pod-ac7be750-d190-4700-bcac-ce2317ae0c6e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025082521s + STEP: Saw pod success 08/24/23 12:48:38.588 + Aug 24 12:48:38.588: INFO: Pod "pod-ac7be750-d190-4700-bcac-ce2317ae0c6e" satisfied condition "Succeeded or Failed" + Aug 24 12:48:38.595: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-ac7be750-d190-4700-bcac-ce2317ae0c6e container test-container: + STEP: delete the pod 08/24/23 12:48:38.607 + Aug 24 12:48:38.626: INFO: Waiting for pod pod-ac7be750-d190-4700-bcac-ce2317ae0c6e to disappear + Aug 24 12:48:38.633: INFO: Pod pod-ac7be750-d190-4700-bcac-ce2317ae0c6e no longer exists + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:42:17.041: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] + Aug 24 12:48:38.633: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "crd-watch-1877" for this suite. 07/29/23 16:42:17.053 + STEP: Destroying namespace "emptydir-9232" for this suite. 08/24/23 12:48:38.643 << End Captured GinkgoWriter Output ------------------------------ -SSSSS ------------------------------- -[sig-node] Kubelet when scheduling an agnhost Pod with hostAliases - should write entries to /etc/hosts [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:148 -[BeforeEach] [sig-node] Kubelet +[sig-storage] Secrets + should be immutable if `immutable` field is set [Conformance] + test/e2e/common/storage/secrets_volume.go:386 +[BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:42:17.068 -Jul 29 16:42:17.069: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubelet-test 07/29/23 16:42:17.072 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:42:17.106 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:42:17.111 -[BeforeEach] [sig-node] Kubelet +STEP: Creating a kubernetes client 08/24/23 12:48:38.656 +Aug 24 12:48:38.656: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 12:48:38.658 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:38.696 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:38.705 +[BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Kubelet - test/e2e/common/node/kubelet.go:41 -[It] should write entries to /etc/hosts [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:148 -STEP: Waiting for pod completion 07/29/23 16:42:17.134 -Jul 29 16:42:17.135: INFO: Waiting up to 3m0s for pod "agnhost-host-aliases89ed17b1-f502-4447-9490-3a4f0046d8c1" in namespace "kubelet-test-4734" to be "completed" -Jul 29 16:42:17.153: INFO: Pod "agnhost-host-aliases89ed17b1-f502-4447-9490-3a4f0046d8c1": Phase="Pending", Reason="", readiness=false. Elapsed: 17.88963ms -Jul 29 16:42:19.161: INFO: Pod "agnhost-host-aliases89ed17b1-f502-4447-9490-3a4f0046d8c1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.026425614s -Jul 29 16:42:21.168: INFO: Pod "agnhost-host-aliases89ed17b1-f502-4447-9490-3a4f0046d8c1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.033148333s -Jul 29 16:42:21.169: INFO: Pod "agnhost-host-aliases89ed17b1-f502-4447-9490-3a4f0046d8c1" satisfied condition "completed" -[AfterEach] [sig-node] Kubelet +[It] should be immutable if `immutable` field is set [Conformance] + test/e2e/common/storage/secrets_volume.go:386 +[AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 16:42:21.200: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Kubelet +Aug 24 12:48:38.792: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Kubelet +[DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Kubelet +[DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "kubelet-test-4734" for this suite. 07/29/23 16:42:21.211 +STEP: Destroying namespace "secrets-9721" for this suite. 08/24/23 12:48:38.801 ------------------------------ -• [4.158 seconds] -[sig-node] Kubelet -test/e2e/common/node/framework.go:23 - when scheduling an agnhost Pod with hostAliases - test/e2e/common/node/kubelet.go:140 - should write entries to /etc/hosts [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:148 +• [0.156 seconds] +[sig-storage] Secrets +test/e2e/common/storage/framework.go:23 + should be immutable if `immutable` field is set [Conformance] + test/e2e/common/storage/secrets_volume.go:386 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Kubelet + [BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:42:17.068 - Jul 29 16:42:17.069: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubelet-test 07/29/23 16:42:17.072 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:42:17.106 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:42:17.111 - [BeforeEach] [sig-node] Kubelet + STEP: Creating a kubernetes client 08/24/23 12:48:38.656 + Aug 24 12:48:38.656: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 12:48:38.658 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:38.696 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:38.705 + [BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Kubelet - test/e2e/common/node/kubelet.go:41 - [It] should write entries to /etc/hosts [NodeConformance] [Conformance] - test/e2e/common/node/kubelet.go:148 - STEP: Waiting for pod completion 07/29/23 16:42:17.134 - Jul 29 16:42:17.135: INFO: Waiting up to 3m0s for pod "agnhost-host-aliases89ed17b1-f502-4447-9490-3a4f0046d8c1" in namespace "kubelet-test-4734" to be "completed" - Jul 29 16:42:17.153: INFO: Pod "agnhost-host-aliases89ed17b1-f502-4447-9490-3a4f0046d8c1": Phase="Pending", Reason="", readiness=false. Elapsed: 17.88963ms - Jul 29 16:42:19.161: INFO: Pod "agnhost-host-aliases89ed17b1-f502-4447-9490-3a4f0046d8c1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.026425614s - Jul 29 16:42:21.168: INFO: Pod "agnhost-host-aliases89ed17b1-f502-4447-9490-3a4f0046d8c1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.033148333s - Jul 29 16:42:21.169: INFO: Pod "agnhost-host-aliases89ed17b1-f502-4447-9490-3a4f0046d8c1" satisfied condition "completed" - [AfterEach] [sig-node] Kubelet + [It] should be immutable if `immutable` field is set [Conformance] + test/e2e/common/storage/secrets_volume.go:386 + [AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 16:42:21.200: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Kubelet + Aug 24 12:48:38.792: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Kubelet + [DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Kubelet + [DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "kubelet-test-4734" for this suite. 07/29/23 16:42:21.211 + STEP: Destroying namespace "secrets-9721" for this suite. 08/24/23 12:48:38.801 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSS ------------------------------ -[sig-node] Variable Expansion - should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] - test/e2e/common/node/expansion.go:225 -[BeforeEach] [sig-node] Variable Expansion +[sig-node] RuntimeClass + should support RuntimeClasses API operations [Conformance] + test/e2e/common/node/runtimeclass.go:189 +[BeforeEach] [sig-node] RuntimeClass set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:42:21.228 -Jul 29 16:42:21.228: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename var-expansion 07/29/23 16:42:21.232 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:42:21.258 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:42:21.263 -[BeforeEach] [sig-node] Variable Expansion +STEP: Creating a kubernetes client 08/24/23 12:48:38.814 +Aug 24 12:48:38.814: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename runtimeclass 08/24/23 12:48:38.816 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:38.85 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:38.855 +[BeforeEach] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:31 -[It] should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] - test/e2e/common/node/expansion.go:225 -STEP: creating the pod with failed condition 07/29/23 16:42:21.268 -Jul 29 16:42:21.282: INFO: Waiting up to 2m0s for pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" in namespace "var-expansion-8355" to be "running" -Jul 29 16:42:21.289: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 6.659344ms -Jul 29 16:42:23.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014514889s -Jul 29 16:42:25.301: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018783802s -Jul 29 16:42:27.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 6.015387262s -Jul 29 16:42:29.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 8.014601744s -Jul 29 16:42:31.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 10.016549231s -Jul 29 16:42:33.296: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 12.013700412s -Jul 29 16:42:35.296: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 14.013915419s -Jul 29 16:42:37.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 16.016575895s -Jul 29 16:42:39.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 18.015782939s -Jul 29 16:42:41.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 20.017959942s -Jul 29 16:42:43.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 22.016237889s -Jul 29 16:42:45.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 24.016546821s -Jul 29 16:42:47.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 26.015188883s -Jul 29 16:42:49.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 28.015583055s -Jul 29 16:42:51.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 30.017393591s -Jul 29 16:42:53.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 32.01688518s -Jul 29 16:42:55.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 34.014773573s -Jul 29 16:42:57.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 36.017430695s -Jul 29 16:42:59.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 38.015565328s -Jul 29 16:43:01.302: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 40.019311058s -Jul 29 16:43:03.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 42.016151855s -Jul 29 16:43:05.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 44.01586597s -Jul 29 16:43:07.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 46.015881132s -Jul 29 16:43:09.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 48.014330567s -Jul 29 16:43:11.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 50.015518706s -Jul 29 16:43:13.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 52.014436823s -Jul 29 16:43:15.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 54.016585554s -Jul 29 16:43:17.304: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 56.021835316s -Jul 29 16:43:19.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 58.015385586s -Jul 29 16:43:21.304: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.02184265s -Jul 29 16:43:23.302: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.020066627s -Jul 29 16:43:25.296: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.014232667s -Jul 29 16:43:27.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.017300994s -Jul 29 16:43:29.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.015054481s -Jul 29 16:43:31.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.015514075s -Jul 29 16:43:33.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.017682448s -Jul 29 16:43:35.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.017532668s -Jul 29 16:43:37.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.014925163s -Jul 29 16:43:39.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.01537105s -Jul 29 16:43:41.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.015742092s -Jul 29 16:43:43.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.017518172s -Jul 29 16:43:45.303: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.020928505s -Jul 29 16:43:47.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.015717829s -Jul 29 16:43:49.296: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.013664934s -Jul 29 16:43:51.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.016590659s -Jul 29 16:43:53.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.015301568s -Jul 29 16:43:55.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.018096998s -Jul 29 16:43:57.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.0164676s -Jul 29 16:43:59.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.015447592s -Jul 29 16:44:01.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.018261798s -Jul 29 16:44:03.301: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.019247418s -Jul 29 16:44:05.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.0153322s -Jul 29 16:44:07.303: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.02068846s -Jul 29 16:44:09.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.014888182s -Jul 29 16:44:11.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.018018717s -Jul 29 16:44:13.301: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.018703225s -Jul 29 16:44:15.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.015105826s -Jul 29 16:44:17.295: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.012964888s -Jul 29 16:44:19.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.014586479s -Jul 29 16:44:21.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.014315205s -Jul 29 16:44:21.303: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.021016632s -STEP: updating the pod 07/29/23 16:44:21.304 -Jul 29 16:44:21.826: INFO: Successfully updated pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" -STEP: waiting for pod running 07/29/23 16:44:21.826 -Jul 29 16:44:21.826: INFO: Waiting up to 2m0s for pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" in namespace "var-expansion-8355" to be "running" -Jul 29 16:44:21.833: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 6.410437ms -Jul 29 16:44:23.844: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Running", Reason="", readiness=true. Elapsed: 2.017798268s -Jul 29 16:44:23.844: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" satisfied condition "running" -STEP: deleting the pod gracefully 07/29/23 16:44:23.844 -Jul 29 16:44:23.845: INFO: Deleting pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" in namespace "var-expansion-8355" -Jul 29 16:44:23.865: INFO: Wait up to 5m0s for pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" to be fully deleted -[AfterEach] [sig-node] Variable Expansion +[It] should support RuntimeClasses API operations [Conformance] + test/e2e/common/node/runtimeclass.go:189 +STEP: getting /apis 08/24/23 12:48:38.861 +STEP: getting /apis/node.k8s.io 08/24/23 12:48:38.865 +STEP: getting /apis/node.k8s.io/v1 08/24/23 12:48:38.867 +STEP: creating 08/24/23 12:48:38.869 +STEP: watching 08/24/23 12:48:38.9 +Aug 24 12:48:38.901: INFO: starting watch +STEP: getting 08/24/23 12:48:38.911 +STEP: listing 08/24/23 12:48:38.916 +STEP: patching 08/24/23 12:48:38.923 +STEP: updating 08/24/23 12:48:38.932 +Aug 24 12:48:38.942: INFO: waiting for watch events with expected annotations +STEP: deleting 08/24/23 12:48:38.942 +STEP: deleting a collection 08/24/23 12:48:38.974 +[AfterEach] [sig-node] RuntimeClass test/e2e/framework/node/init/init.go:32 -Jul 29 16:44:55.918: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Variable Expansion +Aug 24 12:48:39.002: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-node] RuntimeClass dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-node] RuntimeClass tear down framework | framework.go:193 -STEP: Destroying namespace "var-expansion-8355" for this suite. 07/29/23 16:44:55.928 +STEP: Destroying namespace "runtimeclass-6688" for this suite. 08/24/23 12:48:39.011 ------------------------------ -• [SLOW TEST] [154.717 seconds] -[sig-node] Variable Expansion +• [0.215 seconds] +[sig-node] RuntimeClass test/e2e/common/node/framework.go:23 - should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] - test/e2e/common/node/expansion.go:225 + should support RuntimeClasses API operations [Conformance] + test/e2e/common/node/runtimeclass.go:189 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Variable Expansion + [BeforeEach] [sig-node] RuntimeClass set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:42:21.228 - Jul 29 16:42:21.228: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename var-expansion 07/29/23 16:42:21.232 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:42:21.258 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:42:21.263 - [BeforeEach] [sig-node] Variable Expansion + STEP: Creating a kubernetes client 08/24/23 12:48:38.814 + Aug 24 12:48:38.814: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename runtimeclass 08/24/23 12:48:38.816 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:38.85 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:38.855 + [BeforeEach] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:31 - [It] should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] - test/e2e/common/node/expansion.go:225 - STEP: creating the pod with failed condition 07/29/23 16:42:21.268 - Jul 29 16:42:21.282: INFO: Waiting up to 2m0s for pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" in namespace "var-expansion-8355" to be "running" - Jul 29 16:42:21.289: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 6.659344ms - Jul 29 16:42:23.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014514889s - Jul 29 16:42:25.301: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018783802s - Jul 29 16:42:27.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 6.015387262s - Jul 29 16:42:29.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 8.014601744s - Jul 29 16:42:31.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 10.016549231s - Jul 29 16:42:33.296: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 12.013700412s - Jul 29 16:42:35.296: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 14.013915419s - Jul 29 16:42:37.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 16.016575895s - Jul 29 16:42:39.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 18.015782939s - Jul 29 16:42:41.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 20.017959942s - Jul 29 16:42:43.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 22.016237889s - Jul 29 16:42:45.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 24.016546821s - Jul 29 16:42:47.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 26.015188883s - Jul 29 16:42:49.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 28.015583055s - Jul 29 16:42:51.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 30.017393591s - Jul 29 16:42:53.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 32.01688518s - Jul 29 16:42:55.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 34.014773573s - Jul 29 16:42:57.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 36.017430695s - Jul 29 16:42:59.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 38.015565328s - Jul 29 16:43:01.302: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 40.019311058s - Jul 29 16:43:03.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 42.016151855s - Jul 29 16:43:05.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 44.01586597s - Jul 29 16:43:07.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 46.015881132s - Jul 29 16:43:09.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 48.014330567s - Jul 29 16:43:11.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 50.015518706s - Jul 29 16:43:13.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 52.014436823s - Jul 29 16:43:15.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 54.016585554s - Jul 29 16:43:17.304: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 56.021835316s - Jul 29 16:43:19.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 58.015385586s - Jul 29 16:43:21.304: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.02184265s - Jul 29 16:43:23.302: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.020066627s - Jul 29 16:43:25.296: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.014232667s - Jul 29 16:43:27.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.017300994s - Jul 29 16:43:29.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.015054481s - Jul 29 16:43:31.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.015514075s - Jul 29 16:43:33.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.017682448s - Jul 29 16:43:35.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.017532668s - Jul 29 16:43:37.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.014925163s - Jul 29 16:43:39.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.01537105s - Jul 29 16:43:41.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.015742092s - Jul 29 16:43:43.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.017518172s - Jul 29 16:43:45.303: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.020928505s - Jul 29 16:43:47.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.015717829s - Jul 29 16:43:49.296: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.013664934s - Jul 29 16:43:51.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.016590659s - Jul 29 16:43:53.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.015301568s - Jul 29 16:43:55.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.018096998s - Jul 29 16:43:57.299: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.0164676s - Jul 29 16:43:59.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.015447592s - Jul 29 16:44:01.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.018261798s - Jul 29 16:44:03.301: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.019247418s - Jul 29 16:44:05.298: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.0153322s - Jul 29 16:44:07.303: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.02068846s - Jul 29 16:44:09.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.014888182s - Jul 29 16:44:11.300: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.018018717s - Jul 29 16:44:13.301: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.018703225s - Jul 29 16:44:15.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.015105826s - Jul 29 16:44:17.295: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.012964888s - Jul 29 16:44:19.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.014586479s - Jul 29 16:44:21.297: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.014315205s - Jul 29 16:44:21.303: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.021016632s - STEP: updating the pod 07/29/23 16:44:21.304 - Jul 29 16:44:21.826: INFO: Successfully updated pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" - STEP: waiting for pod running 07/29/23 16:44:21.826 - Jul 29 16:44:21.826: INFO: Waiting up to 2m0s for pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" in namespace "var-expansion-8355" to be "running" - Jul 29 16:44:21.833: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Pending", Reason="", readiness=false. Elapsed: 6.410437ms - Jul 29 16:44:23.844: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12": Phase="Running", Reason="", readiness=true. Elapsed: 2.017798268s - Jul 29 16:44:23.844: INFO: Pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" satisfied condition "running" - STEP: deleting the pod gracefully 07/29/23 16:44:23.844 - Jul 29 16:44:23.845: INFO: Deleting pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" in namespace "var-expansion-8355" - Jul 29 16:44:23.865: INFO: Wait up to 5m0s for pod "var-expansion-092521e9-0282-4e65-a909-75c386489e12" to be fully deleted - [AfterEach] [sig-node] Variable Expansion + [It] should support RuntimeClasses API operations [Conformance] + test/e2e/common/node/runtimeclass.go:189 + STEP: getting /apis 08/24/23 12:48:38.861 + STEP: getting /apis/node.k8s.io 08/24/23 12:48:38.865 + STEP: getting /apis/node.k8s.io/v1 08/24/23 12:48:38.867 + STEP: creating 08/24/23 12:48:38.869 + STEP: watching 08/24/23 12:48:38.9 + Aug 24 12:48:38.901: INFO: starting watch + STEP: getting 08/24/23 12:48:38.911 + STEP: listing 08/24/23 12:48:38.916 + STEP: patching 08/24/23 12:48:38.923 + STEP: updating 08/24/23 12:48:38.932 + Aug 24 12:48:38.942: INFO: waiting for watch events with expected annotations + STEP: deleting 08/24/23 12:48:38.942 + STEP: deleting a collection 08/24/23 12:48:38.974 + [AfterEach] [sig-node] RuntimeClass test/e2e/framework/node/init/init.go:32 - Jul 29 16:44:55.918: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Variable Expansion + Aug 24 12:48:39.002: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-node] RuntimeClass dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-node] RuntimeClass tear down framework | framework.go:193 - STEP: Destroying namespace "var-expansion-8355" for this suite. 07/29/23 16:44:55.928 + STEP: Destroying namespace "runtimeclass-6688" for this suite. 08/24/23 12:48:39.011 << End Captured GinkgoWriter Output ------------------------------ -SSS ------------------------------- -[sig-node] Container Runtime blackbox test on terminated container - should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:216 +[sig-node] Container Runtime blackbox test when starting a container that exits + should run with the expected status [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:52 [BeforeEach] [sig-node] Container Runtime set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:44:55.95 -Jul 29 16:44:55.950: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-runtime 07/29/23 16:44:55.953 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:44:55.993 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:44:56 +STEP: Creating a kubernetes client 08/24/23 12:48:39.029 +Aug 24 12:48:39.030: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-runtime 08/24/23 12:48:39.034 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:39.076 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:39.084 [BeforeEach] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:31 -[It] should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:216 -STEP: create the container 07/29/23 16:44:56.008 -STEP: wait for the container to reach Failed 07/29/23 16:44:56.033 -STEP: get the container status 07/29/23 16:45:00.09 -STEP: the container should be terminated 07/29/23 16:45:00.096 -STEP: the termination message should be set 07/29/23 16:45:00.097 -Jul 29 16:45:00.097: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- -STEP: delete the container 07/29/23 16:45:00.097 +[It] should run with the expected status [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:52 +STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' 08/24/23 12:48:39.107 +STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' 08/24/23 12:48:57.324 +STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition 08/24/23 12:48:57.33 +STEP: Container 'terminate-cmd-rpa': should get the expected 'State' 08/24/23 12:48:57.341 +STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] 08/24/23 12:48:57.341 +STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' 08/24/23 12:48:57.404 +STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' 08/24/23 12:49:00.458 +STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition 08/24/23 12:49:02.477 +STEP: Container 'terminate-cmd-rpof': should get the expected 'State' 08/24/23 12:49:02.487 +STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] 08/24/23 12:49:02.488 +STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' 08/24/23 12:49:02.517 +STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' 08/24/23 12:49:03.543 +STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition 08/24/23 12:49:06.581 +STEP: Container 'terminate-cmd-rpn': should get the expected 'State' 08/24/23 12:49:06.592 +STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] 08/24/23 12:49:06.592 [AfterEach] [sig-node] Container Runtime test/e2e/framework/node/init/init.go:32 -Jul 29 16:45:00.124: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:49:06.635: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Container Runtime dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] Container Runtime tear down framework | framework.go:193 -STEP: Destroying namespace "container-runtime-1120" for this suite. 07/29/23 16:45:00.132 +STEP: Destroying namespace "container-runtime-8223" for this suite. 08/24/23 12:49:06.642 ------------------------------ -• [4.192 seconds] +• [SLOW TEST] [27.624 seconds] [sig-node] Container Runtime test/e2e/common/node/framework.go:23 blackbox test test/e2e/common/node/runtime.go:44 - on terminated container - test/e2e/common/node/runtime.go:137 - should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:216 + when starting a container that exits + test/e2e/common/node/runtime.go:45 + should run with the expected status [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:52 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-node] Container Runtime set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:44:55.95 - Jul 29 16:44:55.950: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-runtime 07/29/23 16:44:55.953 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:44:55.993 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:44:56 + STEP: Creating a kubernetes client 08/24/23 12:48:39.029 + Aug 24 12:48:39.030: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-runtime 08/24/23 12:48:39.034 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:48:39.076 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:48:39.084 [BeforeEach] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:31 - [It] should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:216 - STEP: create the container 07/29/23 16:44:56.008 - STEP: wait for the container to reach Failed 07/29/23 16:44:56.033 - STEP: get the container status 07/29/23 16:45:00.09 - STEP: the container should be terminated 07/29/23 16:45:00.096 - STEP: the termination message should be set 07/29/23 16:45:00.097 - Jul 29 16:45:00.097: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- - STEP: delete the container 07/29/23 16:45:00.097 + [It] should run with the expected status [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:52 + STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' 08/24/23 12:48:39.107 + STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' 08/24/23 12:48:57.324 + STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition 08/24/23 12:48:57.33 + STEP: Container 'terminate-cmd-rpa': should get the expected 'State' 08/24/23 12:48:57.341 + STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] 08/24/23 12:48:57.341 + STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' 08/24/23 12:48:57.404 + STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' 08/24/23 12:49:00.458 + STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition 08/24/23 12:49:02.477 + STEP: Container 'terminate-cmd-rpof': should get the expected 'State' 08/24/23 12:49:02.487 + STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] 08/24/23 12:49:02.488 + STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' 08/24/23 12:49:02.517 + STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' 08/24/23 12:49:03.543 + STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition 08/24/23 12:49:06.581 + STEP: Container 'terminate-cmd-rpn': should get the expected 'State' 08/24/23 12:49:06.592 + STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] 08/24/23 12:49:06.592 [AfterEach] [sig-node] Container Runtime test/e2e/framework/node/init/init.go:32 - Jul 29 16:45:00.124: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:49:06.635: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-node] Container Runtime dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-node] Container Runtime tear down framework | framework.go:193 - STEP: Destroying namespace "container-runtime-1120" for this suite. 07/29/23 16:45:00.132 + STEP: Destroying namespace "container-runtime-8223" for this suite. 08/24/23 12:49:06.642 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should be able to change the type from ExternalName to ClusterIP [Conformance] - test/e2e/network/service.go:1438 -[BeforeEach] [sig-network] Services +[sig-cli] Kubectl client Proxy server + should support proxy with --port 0 [Conformance] + test/e2e/kubectl/kubectl.go:1787 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:45:00.143 -Jul 29 16:45:00.144: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 16:45:00.146 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:45:00.177 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:45:00.194 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 12:49:06.666 +Aug 24 12:49:06.666: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:49:06.669 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:06.702 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:06.712 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should be able to change the type from ExternalName to ClusterIP [Conformance] - test/e2e/network/service.go:1438 -STEP: creating a service externalname-service with the type=ExternalName in namespace services-2741 07/29/23 16:45:00.208 -STEP: changing the ExternalName service to type=ClusterIP 07/29/23 16:45:00.224 -STEP: creating replication controller externalname-service in namespace services-2741 07/29/23 16:45:00.255 -I0729 16:45:00.271137 13 runners.go:193] Created replication controller with name: externalname-service, namespace: services-2741, replica count: 2 -I0729 16:45:03.322779 13 runners.go:193] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Jul 29 16:45:03.323: INFO: Creating new exec pod -Jul 29 16:45:03.335: INFO: Waiting up to 5m0s for pod "execpodhv2mz" in namespace "services-2741" to be "running" -Jul 29 16:45:03.357: INFO: Pod "execpodhv2mz": Phase="Pending", Reason="", readiness=false. Elapsed: 22.053062ms -Jul 29 16:45:05.369: INFO: Pod "execpodhv2mz": Phase="Running", Reason="", readiness=true. Elapsed: 2.034186241s -Jul 29 16:45:05.369: INFO: Pod "execpodhv2mz" satisfied condition "running" -Jul 29 16:45:06.371: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-2741 exec execpodhv2mz -- /bin/sh -x -c nc -v -z -w 2 externalname-service 80' -Jul 29 16:45:06.694: INFO: stderr: "+ nc -v -z -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" -Jul 29 16:45:06.695: INFO: stdout: "" -Jul 29 16:45:06.695: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-2741 exec execpodhv2mz -- /bin/sh -x -c nc -v -z -w 2 10.233.44.45 80' -Jul 29 16:45:06.957: INFO: stderr: "+ nc -v -z -w 2 10.233.44.45 80\nConnection to 10.233.44.45 80 port [tcp/http] succeeded!\n" -Jul 29 16:45:06.957: INFO: stdout: "" -Jul 29 16:45:06.957: INFO: Cleaning up the ExternalName to ClusterIP test service -[AfterEach] [sig-network] Services +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[It] should support proxy with --port 0 [Conformance] + test/e2e/kubectl/kubectl.go:1787 +STEP: starting the proxy server 08/24/23 12:49:06.722 +Aug 24 12:49:06.724: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-4046 proxy -p 0 --disable-filter' +STEP: curling proxy /api/ output 08/24/23 12:49:06.825 +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 16:45:07.004: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 12:49:06.848: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "services-2741" for this suite. 07/29/23 16:45:07.022 +STEP: Destroying namespace "kubectl-4046" for this suite. 08/24/23 12:49:06.857 ------------------------------ -• [SLOW TEST] [6.905 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should be able to change the type from ExternalName to ClusterIP [Conformance] - test/e2e/network/service.go:1438 +• [0.202 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Proxy server + test/e2e/kubectl/kubectl.go:1780 + should support proxy with --port 0 [Conformance] + test/e2e/kubectl/kubectl.go:1787 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:45:00.143 - Jul 29 16:45:00.144: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 16:45:00.146 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:45:00.177 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:45:00.194 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 12:49:06.666 + Aug 24 12:49:06.666: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:49:06.669 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:06.702 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:06.712 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should be able to change the type from ExternalName to ClusterIP [Conformance] - test/e2e/network/service.go:1438 - STEP: creating a service externalname-service with the type=ExternalName in namespace services-2741 07/29/23 16:45:00.208 - STEP: changing the ExternalName service to type=ClusterIP 07/29/23 16:45:00.224 - STEP: creating replication controller externalname-service in namespace services-2741 07/29/23 16:45:00.255 - I0729 16:45:00.271137 13 runners.go:193] Created replication controller with name: externalname-service, namespace: services-2741, replica count: 2 - I0729 16:45:03.322779 13 runners.go:193] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - Jul 29 16:45:03.323: INFO: Creating new exec pod - Jul 29 16:45:03.335: INFO: Waiting up to 5m0s for pod "execpodhv2mz" in namespace "services-2741" to be "running" - Jul 29 16:45:03.357: INFO: Pod "execpodhv2mz": Phase="Pending", Reason="", readiness=false. Elapsed: 22.053062ms - Jul 29 16:45:05.369: INFO: Pod "execpodhv2mz": Phase="Running", Reason="", readiness=true. Elapsed: 2.034186241s - Jul 29 16:45:05.369: INFO: Pod "execpodhv2mz" satisfied condition "running" - Jul 29 16:45:06.371: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-2741 exec execpodhv2mz -- /bin/sh -x -c nc -v -z -w 2 externalname-service 80' - Jul 29 16:45:06.694: INFO: stderr: "+ nc -v -z -w 2 externalname-service 80\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" - Jul 29 16:45:06.695: INFO: stdout: "" - Jul 29 16:45:06.695: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-2741 exec execpodhv2mz -- /bin/sh -x -c nc -v -z -w 2 10.233.44.45 80' - Jul 29 16:45:06.957: INFO: stderr: "+ nc -v -z -w 2 10.233.44.45 80\nConnection to 10.233.44.45 80 port [tcp/http] succeeded!\n" - Jul 29 16:45:06.957: INFO: stdout: "" - Jul 29 16:45:06.957: INFO: Cleaning up the ExternalName to ClusterIP test service - [AfterEach] [sig-network] Services + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [It] should support proxy with --port 0 [Conformance] + test/e2e/kubectl/kubectl.go:1787 + STEP: starting the proxy server 08/24/23 12:49:06.722 + Aug 24 12:49:06.724: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-4046 proxy -p 0 --disable-filter' + STEP: curling proxy /api/ output 08/24/23 12:49:06.825 + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 16:45:07.004: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 12:49:06.848: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "services-2741" for this suite. 07/29/23 16:45:07.022 + STEP: Destroying namespace "kubectl-4046" for this suite. 08/24/23 12:49:06.857 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSS +SSSSSSSSSSSS ------------------------------ -[sig-instrumentation] Events API - should ensure that an event can be fetched, patched, deleted, and listed [Conformance] - test/e2e/instrumentation/events.go:98 -[BeforeEach] [sig-instrumentation] Events API +[sig-storage] ConfigMap + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:74 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:45:07.05 -Jul 29 16:45:07.050: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename events 07/29/23 16:45:07.052 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:45:07.094 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:45:07.102 -[BeforeEach] [sig-instrumentation] Events API +STEP: Creating a kubernetes client 08/24/23 12:49:06.87 +Aug 24 12:49:06.870: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 12:49:06.872 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:06.902 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:06.908 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-instrumentation] Events API - test/e2e/instrumentation/events.go:84 -[It] should ensure that an event can be fetched, patched, deleted, and listed [Conformance] - test/e2e/instrumentation/events.go:98 -STEP: creating a test event 07/29/23 16:45:07.112 -STEP: listing events in all namespaces 07/29/23 16:45:07.129 -STEP: listing events in test namespace 07/29/23 16:45:07.139 -STEP: listing events with field selection filtering on source 07/29/23 16:45:07.146 -STEP: listing events with field selection filtering on reportingController 07/29/23 16:45:07.154 -STEP: getting the test event 07/29/23 16:45:07.161 -STEP: patching the test event 07/29/23 16:45:07.172 -STEP: getting the test event 07/29/23 16:45:07.204 -STEP: updating the test event 07/29/23 16:45:07.214 -STEP: getting the test event 07/29/23 16:45:07.234 -STEP: deleting the test event 07/29/23 16:45:07.245 -STEP: listing events in all namespaces 07/29/23 16:45:07.269 -STEP: listing events in test namespace 07/29/23 16:45:07.288 -[AfterEach] [sig-instrumentation] Events API +[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:74 +STEP: Creating configMap with name configmap-test-volume-4ebc090b-bbf0-4080-b312-ae83bc9120d3 08/24/23 12:49:06.913 +STEP: Creating a pod to test consume configMaps 08/24/23 12:49:06.922 +Aug 24 12:49:06.935: INFO: Waiting up to 5m0s for pod "pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f" in namespace "configmap-9502" to be "Succeeded or Failed" +Aug 24 12:49:06.946: INFO: Pod "pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f": Phase="Pending", Reason="", readiness=false. Elapsed: 11.214007ms +Aug 24 12:49:08.954: INFO: Pod "pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018469689s +Aug 24 12:49:10.952: INFO: Pod "pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017349187s +STEP: Saw pod success 08/24/23 12:49:10.953 +Aug 24 12:49:10.953: INFO: Pod "pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f" satisfied condition "Succeeded or Failed" +Aug 24 12:49:10.960: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f container agnhost-container: +STEP: delete the pod 08/24/23 12:49:10.974 +Aug 24 12:49:11.001: INFO: Waiting for pod pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f to disappear +Aug 24 12:49:11.006: INFO: Pod pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f no longer exists +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:45:07.306: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-instrumentation] Events API +Aug 24 12:49:11.006: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-instrumentation] Events API +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-instrumentation] Events API +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "events-4294" for this suite. 07/29/23 16:45:07.339 +STEP: Destroying namespace "configmap-9502" for this suite. 08/24/23 12:49:11.018 ------------------------------ -• [0.309 seconds] -[sig-instrumentation] Events API -test/e2e/instrumentation/common/framework.go:23 - should ensure that an event can be fetched, patched, deleted, and listed [Conformance] - test/e2e/instrumentation/events.go:98 +• [4.190 seconds] +[sig-storage] ConfigMap +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:74 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-instrumentation] Events API + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:45:07.05 - Jul 29 16:45:07.050: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename events 07/29/23 16:45:07.052 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:45:07.094 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:45:07.102 - [BeforeEach] [sig-instrumentation] Events API + STEP: Creating a kubernetes client 08/24/23 12:49:06.87 + Aug 24 12:49:06.870: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 12:49:06.872 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:06.902 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:06.908 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-instrumentation] Events API - test/e2e/instrumentation/events.go:84 - [It] should ensure that an event can be fetched, patched, deleted, and listed [Conformance] - test/e2e/instrumentation/events.go:98 - STEP: creating a test event 07/29/23 16:45:07.112 - STEP: listing events in all namespaces 07/29/23 16:45:07.129 - STEP: listing events in test namespace 07/29/23 16:45:07.139 - STEP: listing events with field selection filtering on source 07/29/23 16:45:07.146 - STEP: listing events with field selection filtering on reportingController 07/29/23 16:45:07.154 - STEP: getting the test event 07/29/23 16:45:07.161 - STEP: patching the test event 07/29/23 16:45:07.172 - STEP: getting the test event 07/29/23 16:45:07.204 - STEP: updating the test event 07/29/23 16:45:07.214 - STEP: getting the test event 07/29/23 16:45:07.234 - STEP: deleting the test event 07/29/23 16:45:07.245 - STEP: listing events in all namespaces 07/29/23 16:45:07.269 - STEP: listing events in test namespace 07/29/23 16:45:07.288 - [AfterEach] [sig-instrumentation] Events API + [It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:74 + STEP: Creating configMap with name configmap-test-volume-4ebc090b-bbf0-4080-b312-ae83bc9120d3 08/24/23 12:49:06.913 + STEP: Creating a pod to test consume configMaps 08/24/23 12:49:06.922 + Aug 24 12:49:06.935: INFO: Waiting up to 5m0s for pod "pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f" in namespace "configmap-9502" to be "Succeeded or Failed" + Aug 24 12:49:06.946: INFO: Pod "pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f": Phase="Pending", Reason="", readiness=false. Elapsed: 11.214007ms + Aug 24 12:49:08.954: INFO: Pod "pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018469689s + Aug 24 12:49:10.952: INFO: Pod "pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017349187s + STEP: Saw pod success 08/24/23 12:49:10.953 + Aug 24 12:49:10.953: INFO: Pod "pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f" satisfied condition "Succeeded or Failed" + Aug 24 12:49:10.960: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f container agnhost-container: + STEP: delete the pod 08/24/23 12:49:10.974 + Aug 24 12:49:11.001: INFO: Waiting for pod pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f to disappear + Aug 24 12:49:11.006: INFO: Pod pod-configmaps-d88424e8-d46f-4c36-9da2-c3f4424e485f no longer exists + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:45:07.306: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-instrumentation] Events API + Aug 24 12:49:11.006: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-instrumentation] Events API + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-instrumentation] Events API + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "events-4294" for this suite. 07/29/23 16:45:07.339 + STEP: Destroying namespace "configmap-9502" for this suite. 08/24/23 12:49:11.018 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSS ------------------------------ -[sig-api-machinery] Garbage collector - should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] - test/e2e/apimachinery/garbage_collector.go:735 -[BeforeEach] [sig-api-machinery] Garbage collector +[sig-api-machinery] Watchers + should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + test/e2e/apimachinery/watch.go:257 +[BeforeEach] [sig-api-machinery] Watchers set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:45:07.36 -Jul 29 16:45:07.360: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename gc 07/29/23 16:45:07.362 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:45:07.409 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:45:07.423 -[BeforeEach] [sig-api-machinery] Garbage collector +STEP: Creating a kubernetes client 08/24/23 12:49:11.061 +Aug 24 12:49:11.061: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename watch 08/24/23 12:49:11.063 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:11.089 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:11.095 +[BeforeEach] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:31 -[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] - test/e2e/apimachinery/garbage_collector.go:735 -STEP: create the rc1 07/29/23 16:45:07.443 -STEP: create the rc2 07/29/23 16:45:07.453 -STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well 07/29/23 16:45:12.496 -STEP: delete the rc simpletest-rc-to-be-deleted 07/29/23 16:45:18.616 -STEP: wait for the rc to be deleted 07/29/23 16:45:18.754 -Jul 29 16:45:24.690: INFO: 93 pods remaining -Jul 29 16:45:24.690: INFO: 68 pods has nil DeletionTimestamp -Jul 29 16:45:24.690: INFO: -Jul 29 16:45:28.949: INFO: 82 pods remaining -Jul 29 16:45:28.949: INFO: 50 pods has nil DeletionTimestamp -Jul 29 16:45:28.949: INFO: -Jul 29 16:45:33.863: INFO: 50 pods remaining -Jul 29 16:45:33.864: INFO: 50 pods has nil DeletionTimestamp -Jul 29 16:45:33.864: INFO: -STEP: Gathering metrics 07/29/23 16:45:38.788 -Jul 29 16:45:38.860: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" -Jul 29 16:45:38.874: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 13.51928ms -Jul 29 16:45:38.874: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) -Jul 29 16:45:38.875: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" -Jul 29 16:45:39.015: INFO: For apiserver_request_total: -For apiserver_request_latency_seconds: -For apiserver_init_events_total: -For garbage_collector_attempt_to_delete_queue_latency: -For garbage_collector_attempt_to_delete_work_duration: -For garbage_collector_attempt_to_orphan_queue_latency: -For garbage_collector_attempt_to_orphan_work_duration: -For garbage_collector_dirty_processing_latency_microseconds: -For garbage_collector_event_processing_latency_microseconds: -For garbage_collector_graph_changes_queue_latency: -For garbage_collector_graph_changes_work_duration: -For garbage_collector_orphan_processing_latency_microseconds: -For namespace_queue_latency: -For namespace_queue_latency_sum: -For namespace_queue_latency_count: -For namespace_retries: -For namespace_work_duration: -For namespace_work_duration_sum: -For namespace_work_duration_count: -For function_duration_seconds: -For errors_total: -For evicted_pods_total: +[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + test/e2e/apimachinery/watch.go:257 +STEP: creating a watch on configmaps with a certain label 08/24/23 12:49:11.101 +STEP: creating a new configmap 08/24/23 12:49:11.104 +STEP: modifying the configmap once 08/24/23 12:49:11.113 +STEP: changing the label value of the configmap 08/24/23 12:49:11.128 +STEP: Expecting to observe a delete notification for the watched object 08/24/23 12:49:11.142 +Aug 24 12:49:11.143: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26793 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 12:49:11.144: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26794 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:11 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 12:49:11.145: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26795 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:11 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying the configmap a second time 08/24/23 12:49:11.145 +STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements 08/24/23 12:49:11.165 +STEP: changing the label value of the configmap back 08/24/23 12:49:21.166 +STEP: modifying the configmap a third time 08/24/23 12:49:21.184 +STEP: deleting the configmap 08/24/23 12:49:21.201 +STEP: Expecting to observe an add notification for the watched object when the label value was restored 08/24/23 12:49:21.213 +Aug 24 12:49:21.213: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26850 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:21 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 12:49:21.214: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26851 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:21 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 12:49:21.215: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26852 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:21 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers + test/e2e/framework/node/init/init.go:32 +Aug 24 12:49:21.216: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Watchers + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-api-machinery] Watchers + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-api-machinery] Watchers + tear down framework | framework.go:193 +STEP: Destroying namespace "watch-1304" for this suite. 08/24/23 12:49:21.232 +------------------------------ +• [SLOW TEST] [10.188 seconds] +[sig-api-machinery] Watchers +test/e2e/apimachinery/framework.go:23 + should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + test/e2e/apimachinery/watch.go:257 -Jul 29 16:45:39.015: INFO: Deleting pod "simpletest-rc-to-be-deleted-24hkd" in namespace "gc-5484" -Jul 29 16:45:39.037: INFO: Deleting pod "simpletest-rc-to-be-deleted-2vv8d" in namespace "gc-5484" -Jul 29 16:45:39.120: INFO: Deleting pod "simpletest-rc-to-be-deleted-4fkvt" in namespace "gc-5484" -Jul 29 16:45:39.161: INFO: Deleting pod "simpletest-rc-to-be-deleted-4m6b2" in namespace "gc-5484" -Jul 29 16:45:39.220: INFO: Deleting pod "simpletest-rc-to-be-deleted-4qbdv" in namespace "gc-5484" -Jul 29 16:45:39.316: INFO: Deleting pod "simpletest-rc-to-be-deleted-4xqx2" in namespace "gc-5484" -Jul 29 16:45:39.378: INFO: Deleting pod "simpletest-rc-to-be-deleted-58crs" in namespace "gc-5484" -Jul 29 16:45:39.463: INFO: Deleting pod "simpletest-rc-to-be-deleted-594w2" in namespace "gc-5484" -Jul 29 16:45:39.524: INFO: Deleting pod "simpletest-rc-to-be-deleted-5dmwr" in namespace "gc-5484" -Jul 29 16:45:39.586: INFO: Deleting pod "simpletest-rc-to-be-deleted-5g2wr" in namespace "gc-5484" -Jul 29 16:45:39.639: INFO: Deleting pod "simpletest-rc-to-be-deleted-65phg" in namespace "gc-5484" -Jul 29 16:45:39.802: INFO: Deleting pod "simpletest-rc-to-be-deleted-66c7m" in namespace "gc-5484" -Jul 29 16:45:40.076: INFO: Deleting pod "simpletest-rc-to-be-deleted-69clj" in namespace "gc-5484" -Jul 29 16:45:40.416: INFO: Deleting pod "simpletest-rc-to-be-deleted-6bmhz" in namespace "gc-5484" -Jul 29 16:45:40.464: INFO: Deleting pod "simpletest-rc-to-be-deleted-75l4w" in namespace "gc-5484" -Jul 29 16:45:40.657: INFO: Deleting pod "simpletest-rc-to-be-deleted-7x7jt" in namespace "gc-5484" -Jul 29 16:45:40.713: INFO: Deleting pod "simpletest-rc-to-be-deleted-7zvcr" in namespace "gc-5484" -Jul 29 16:45:40.824: INFO: Deleting pod "simpletest-rc-to-be-deleted-8blwb" in namespace "gc-5484" -Jul 29 16:45:40.861: INFO: Deleting pod "simpletest-rc-to-be-deleted-8kbvc" in namespace "gc-5484" -Jul 29 16:45:40.927: INFO: Deleting pod "simpletest-rc-to-be-deleted-8nb6v" in namespace "gc-5484" -Jul 29 16:45:40.989: INFO: Deleting pod "simpletest-rc-to-be-deleted-8qlds" in namespace "gc-5484" -Jul 29 16:45:41.042: INFO: Deleting pod "simpletest-rc-to-be-deleted-8wkm4" in namespace "gc-5484" -Jul 29 16:45:41.110: INFO: Deleting pod "simpletest-rc-to-be-deleted-9pskp" in namespace "gc-5484" -Jul 29 16:45:41.195: INFO: Deleting pod "simpletest-rc-to-be-deleted-9pv5f" in namespace "gc-5484" -Jul 29 16:45:41.231: INFO: Deleting pod "simpletest-rc-to-be-deleted-9qcp2" in namespace "gc-5484" -Jul 29 16:45:41.265: INFO: Deleting pod "simpletest-rc-to-be-deleted-9rrsv" in namespace "gc-5484" -Jul 29 16:45:41.330: INFO: Deleting pod "simpletest-rc-to-be-deleted-b2qlw" in namespace "gc-5484" -Jul 29 16:45:41.426: INFO: Deleting pod "simpletest-rc-to-be-deleted-b6k5f" in namespace "gc-5484" -Jul 29 16:45:41.477: INFO: Deleting pod "simpletest-rc-to-be-deleted-btr2q" in namespace "gc-5484" -Jul 29 16:45:41.558: INFO: Deleting pod "simpletest-rc-to-be-deleted-bv576" in namespace "gc-5484" -Jul 29 16:45:41.634: INFO: Deleting pod "simpletest-rc-to-be-deleted-c2pk6" in namespace "gc-5484" -Jul 29 16:45:41.677: INFO: Deleting pod "simpletest-rc-to-be-deleted-chlmr" in namespace "gc-5484" -Jul 29 16:45:41.773: INFO: Deleting pod "simpletest-rc-to-be-deleted-d4jt8" in namespace "gc-5484" -Jul 29 16:45:41.881: INFO: Deleting pod "simpletest-rc-to-be-deleted-d4z25" in namespace "gc-5484" -Jul 29 16:45:41.988: INFO: Deleting pod "simpletest-rc-to-be-deleted-dfcpk" in namespace "gc-5484" -Jul 29 16:45:42.065: INFO: Deleting pod "simpletest-rc-to-be-deleted-dg5nl" in namespace "gc-5484" -Jul 29 16:45:42.109: INFO: Deleting pod "simpletest-rc-to-be-deleted-dhp9h" in namespace "gc-5484" -Jul 29 16:45:42.156: INFO: Deleting pod "simpletest-rc-to-be-deleted-dqnnn" in namespace "gc-5484" -Jul 29 16:45:42.198: INFO: Deleting pod "simpletest-rc-to-be-deleted-dvhrf" in namespace "gc-5484" -Jul 29 16:45:42.244: INFO: Deleting pod "simpletest-rc-to-be-deleted-flv5f" in namespace "gc-5484" -Jul 29 16:45:42.295: INFO: Deleting pod "simpletest-rc-to-be-deleted-fptn8" in namespace "gc-5484" -Jul 29 16:45:42.345: INFO: Deleting pod "simpletest-rc-to-be-deleted-fzgfq" in namespace "gc-5484" -Jul 29 16:45:42.404: INFO: Deleting pod "simpletest-rc-to-be-deleted-g5l5r" in namespace "gc-5484" -Jul 29 16:45:42.463: INFO: Deleting pod "simpletest-rc-to-be-deleted-g99f4" in namespace "gc-5484" -Jul 29 16:45:42.505: INFO: Deleting pod "simpletest-rc-to-be-deleted-h2lwz" in namespace "gc-5484" -Jul 29 16:45:42.593: INFO: Deleting pod "simpletest-rc-to-be-deleted-h5khq" in namespace "gc-5484" -Jul 29 16:45:42.643: INFO: Deleting pod "simpletest-rc-to-be-deleted-hdb5c" in namespace "gc-5484" -Jul 29 16:45:42.770: INFO: Deleting pod "simpletest-rc-to-be-deleted-hjghj" in namespace "gc-5484" -Jul 29 16:45:42.948: INFO: Deleting pod "simpletest-rc-to-be-deleted-j5vlx" in namespace "gc-5484" -Jul 29 16:45:42.981: INFO: Deleting pod "simpletest-rc-to-be-deleted-jf64r" in namespace "gc-5484" -[AfterEach] [sig-api-machinery] Garbage collector + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-api-machinery] Watchers + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:49:11.061 + Aug 24 12:49:11.061: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename watch 08/24/23 12:49:11.063 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:11.089 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:11.095 + [BeforeEach] [sig-api-machinery] Watchers + test/e2e/framework/metrics/init/init.go:31 + [It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + test/e2e/apimachinery/watch.go:257 + STEP: creating a watch on configmaps with a certain label 08/24/23 12:49:11.101 + STEP: creating a new configmap 08/24/23 12:49:11.104 + STEP: modifying the configmap once 08/24/23 12:49:11.113 + STEP: changing the label value of the configmap 08/24/23 12:49:11.128 + STEP: Expecting to observe a delete notification for the watched object 08/24/23 12:49:11.142 + Aug 24 12:49:11.143: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26793 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:11 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 12:49:11.144: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26794 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:11 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 12:49:11.145: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26795 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:11 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} + STEP: modifying the configmap a second time 08/24/23 12:49:11.145 + STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements 08/24/23 12:49:11.165 + STEP: changing the label value of the configmap back 08/24/23 12:49:21.166 + STEP: modifying the configmap a third time 08/24/23 12:49:21.184 + STEP: deleting the configmap 08/24/23 12:49:21.201 + STEP: Expecting to observe an add notification for the watched object when the label value was restored 08/24/23 12:49:21.213 + Aug 24 12:49:21.213: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26850 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:21 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 12:49:21.214: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26851 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:21 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 12:49:21.215: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-1304 13447dbf-1e5f-4024-b34e-def135439317 26852 0 2023-08-24 12:49:11 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-08-24 12:49:21 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} + [AfterEach] [sig-api-machinery] Watchers + test/e2e/framework/node/init/init.go:32 + Aug 24 12:49:21.216: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Watchers + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-api-machinery] Watchers + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-api-machinery] Watchers + tear down framework | framework.go:193 + STEP: Destroying namespace "watch-1304" for this suite. 08/24/23 12:49:21.232 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide container's cpu request [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:221 +[BeforeEach] [sig-storage] Downward API volume + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:49:21.269 +Aug 24 12:49:21.269: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:49:21.271 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:21.305 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:21.312 +[BeforeEach] [sig-storage] Downward API volume + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 +[It] should provide container's cpu request [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:221 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:49:21.321 +Aug 24 12:49:21.340: INFO: Waiting up to 5m0s for pod "downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1" in namespace "downward-api-5862" to be "Succeeded or Failed" +Aug 24 12:49:21.350: INFO: Pod "downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1": Phase="Pending", Reason="", readiness=false. Elapsed: 9.592018ms +Aug 24 12:49:23.359: INFO: Pod "downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018358522s +Aug 24 12:49:25.361: INFO: Pod "downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020640331s +STEP: Saw pod success 08/24/23 12:49:25.362 +Aug 24 12:49:25.362: INFO: Pod "downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1" satisfied condition "Succeeded or Failed" +Aug 24 12:49:25.371: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1 container client-container: +STEP: delete the pod 08/24/23 12:49:25.385 +Aug 24 12:49:25.409: INFO: Waiting for pod downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1 to disappear +Aug 24 12:49:25.417: INFO: Pod downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1 no longer exists +[AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 16:45:43.022: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +Aug 24 12:49:25.417: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +[DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +[DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "gc-5484" for this suite. 07/29/23 16:45:43.043 +STEP: Destroying namespace "downward-api-5862" for this suite. 08/24/23 12:49:25.428 ------------------------------ -• [SLOW TEST] [35.793 seconds] -[sig-api-machinery] Garbage collector -test/e2e/apimachinery/framework.go:23 - should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] - test/e2e/apimachinery/garbage_collector.go:735 +• [4.170 seconds] +[sig-storage] Downward API volume +test/e2e/common/storage/framework.go:23 + should provide container's cpu request [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:221 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Garbage collector + [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:45:07.36 - Jul 29 16:45:07.360: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename gc 07/29/23 16:45:07.362 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:45:07.409 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:45:07.423 - [BeforeEach] [sig-api-machinery] Garbage collector + STEP: Creating a kubernetes client 08/24/23 12:49:21.269 + Aug 24 12:49:21.269: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:49:21.271 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:21.305 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:21.312 + [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 - [It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] - test/e2e/apimachinery/garbage_collector.go:735 - STEP: create the rc1 07/29/23 16:45:07.443 - STEP: create the rc2 07/29/23 16:45:07.453 - STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well 07/29/23 16:45:12.496 - STEP: delete the rc simpletest-rc-to-be-deleted 07/29/23 16:45:18.616 - STEP: wait for the rc to be deleted 07/29/23 16:45:18.754 - Jul 29 16:45:24.690: INFO: 93 pods remaining - Jul 29 16:45:24.690: INFO: 68 pods has nil DeletionTimestamp - Jul 29 16:45:24.690: INFO: - Jul 29 16:45:28.949: INFO: 82 pods remaining - Jul 29 16:45:28.949: INFO: 50 pods has nil DeletionTimestamp - Jul 29 16:45:28.949: INFO: - Jul 29 16:45:33.863: INFO: 50 pods remaining - Jul 29 16:45:33.864: INFO: 50 pods has nil DeletionTimestamp - Jul 29 16:45:33.864: INFO: - STEP: Gathering metrics 07/29/23 16:45:38.788 - Jul 29 16:45:38.860: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" - Jul 29 16:45:38.874: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 13.51928ms - Jul 29 16:45:38.874: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) - Jul 29 16:45:38.875: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" - Jul 29 16:45:39.015: INFO: For apiserver_request_total: - For apiserver_request_latency_seconds: - For apiserver_init_events_total: - For garbage_collector_attempt_to_delete_queue_latency: - For garbage_collector_attempt_to_delete_work_duration: - For garbage_collector_attempt_to_orphan_queue_latency: - For garbage_collector_attempt_to_orphan_work_duration: - For garbage_collector_dirty_processing_latency_microseconds: - For garbage_collector_event_processing_latency_microseconds: - For garbage_collector_graph_changes_queue_latency: - For garbage_collector_graph_changes_work_duration: - For garbage_collector_orphan_processing_latency_microseconds: - For namespace_queue_latency: - For namespace_queue_latency_sum: - For namespace_queue_latency_count: - For namespace_retries: - For namespace_work_duration: - For namespace_work_duration_sum: - For namespace_work_duration_count: - For function_duration_seconds: - For errors_total: - For evicted_pods_total: - - Jul 29 16:45:39.015: INFO: Deleting pod "simpletest-rc-to-be-deleted-24hkd" in namespace "gc-5484" - Jul 29 16:45:39.037: INFO: Deleting pod "simpletest-rc-to-be-deleted-2vv8d" in namespace "gc-5484" - Jul 29 16:45:39.120: INFO: Deleting pod "simpletest-rc-to-be-deleted-4fkvt" in namespace "gc-5484" - Jul 29 16:45:39.161: INFO: Deleting pod "simpletest-rc-to-be-deleted-4m6b2" in namespace "gc-5484" - Jul 29 16:45:39.220: INFO: Deleting pod "simpletest-rc-to-be-deleted-4qbdv" in namespace "gc-5484" - Jul 29 16:45:39.316: INFO: Deleting pod "simpletest-rc-to-be-deleted-4xqx2" in namespace "gc-5484" - Jul 29 16:45:39.378: INFO: Deleting pod "simpletest-rc-to-be-deleted-58crs" in namespace "gc-5484" - Jul 29 16:45:39.463: INFO: Deleting pod "simpletest-rc-to-be-deleted-594w2" in namespace "gc-5484" - Jul 29 16:45:39.524: INFO: Deleting pod "simpletest-rc-to-be-deleted-5dmwr" in namespace "gc-5484" - Jul 29 16:45:39.586: INFO: Deleting pod "simpletest-rc-to-be-deleted-5g2wr" in namespace "gc-5484" - Jul 29 16:45:39.639: INFO: Deleting pod "simpletest-rc-to-be-deleted-65phg" in namespace "gc-5484" - Jul 29 16:45:39.802: INFO: Deleting pod "simpletest-rc-to-be-deleted-66c7m" in namespace "gc-5484" - Jul 29 16:45:40.076: INFO: Deleting pod "simpletest-rc-to-be-deleted-69clj" in namespace "gc-5484" - Jul 29 16:45:40.416: INFO: Deleting pod "simpletest-rc-to-be-deleted-6bmhz" in namespace "gc-5484" - Jul 29 16:45:40.464: INFO: Deleting pod "simpletest-rc-to-be-deleted-75l4w" in namespace "gc-5484" - Jul 29 16:45:40.657: INFO: Deleting pod "simpletest-rc-to-be-deleted-7x7jt" in namespace "gc-5484" - Jul 29 16:45:40.713: INFO: Deleting pod "simpletest-rc-to-be-deleted-7zvcr" in namespace "gc-5484" - Jul 29 16:45:40.824: INFO: Deleting pod "simpletest-rc-to-be-deleted-8blwb" in namespace "gc-5484" - Jul 29 16:45:40.861: INFO: Deleting pod "simpletest-rc-to-be-deleted-8kbvc" in namespace "gc-5484" - Jul 29 16:45:40.927: INFO: Deleting pod "simpletest-rc-to-be-deleted-8nb6v" in namespace "gc-5484" - Jul 29 16:45:40.989: INFO: Deleting pod "simpletest-rc-to-be-deleted-8qlds" in namespace "gc-5484" - Jul 29 16:45:41.042: INFO: Deleting pod "simpletest-rc-to-be-deleted-8wkm4" in namespace "gc-5484" - Jul 29 16:45:41.110: INFO: Deleting pod "simpletest-rc-to-be-deleted-9pskp" in namespace "gc-5484" - Jul 29 16:45:41.195: INFO: Deleting pod "simpletest-rc-to-be-deleted-9pv5f" in namespace "gc-5484" - Jul 29 16:45:41.231: INFO: Deleting pod "simpletest-rc-to-be-deleted-9qcp2" in namespace "gc-5484" - Jul 29 16:45:41.265: INFO: Deleting pod "simpletest-rc-to-be-deleted-9rrsv" in namespace "gc-5484" - Jul 29 16:45:41.330: INFO: Deleting pod "simpletest-rc-to-be-deleted-b2qlw" in namespace "gc-5484" - Jul 29 16:45:41.426: INFO: Deleting pod "simpletest-rc-to-be-deleted-b6k5f" in namespace "gc-5484" - Jul 29 16:45:41.477: INFO: Deleting pod "simpletest-rc-to-be-deleted-btr2q" in namespace "gc-5484" - Jul 29 16:45:41.558: INFO: Deleting pod "simpletest-rc-to-be-deleted-bv576" in namespace "gc-5484" - Jul 29 16:45:41.634: INFO: Deleting pod "simpletest-rc-to-be-deleted-c2pk6" in namespace "gc-5484" - Jul 29 16:45:41.677: INFO: Deleting pod "simpletest-rc-to-be-deleted-chlmr" in namespace "gc-5484" - Jul 29 16:45:41.773: INFO: Deleting pod "simpletest-rc-to-be-deleted-d4jt8" in namespace "gc-5484" - Jul 29 16:45:41.881: INFO: Deleting pod "simpletest-rc-to-be-deleted-d4z25" in namespace "gc-5484" - Jul 29 16:45:41.988: INFO: Deleting pod "simpletest-rc-to-be-deleted-dfcpk" in namespace "gc-5484" - Jul 29 16:45:42.065: INFO: Deleting pod "simpletest-rc-to-be-deleted-dg5nl" in namespace "gc-5484" - Jul 29 16:45:42.109: INFO: Deleting pod "simpletest-rc-to-be-deleted-dhp9h" in namespace "gc-5484" - Jul 29 16:45:42.156: INFO: Deleting pod "simpletest-rc-to-be-deleted-dqnnn" in namespace "gc-5484" - Jul 29 16:45:42.198: INFO: Deleting pod "simpletest-rc-to-be-deleted-dvhrf" in namespace "gc-5484" - Jul 29 16:45:42.244: INFO: Deleting pod "simpletest-rc-to-be-deleted-flv5f" in namespace "gc-5484" - Jul 29 16:45:42.295: INFO: Deleting pod "simpletest-rc-to-be-deleted-fptn8" in namespace "gc-5484" - Jul 29 16:45:42.345: INFO: Deleting pod "simpletest-rc-to-be-deleted-fzgfq" in namespace "gc-5484" - Jul 29 16:45:42.404: INFO: Deleting pod "simpletest-rc-to-be-deleted-g5l5r" in namespace "gc-5484" - Jul 29 16:45:42.463: INFO: Deleting pod "simpletest-rc-to-be-deleted-g99f4" in namespace "gc-5484" - Jul 29 16:45:42.505: INFO: Deleting pod "simpletest-rc-to-be-deleted-h2lwz" in namespace "gc-5484" - Jul 29 16:45:42.593: INFO: Deleting pod "simpletest-rc-to-be-deleted-h5khq" in namespace "gc-5484" - Jul 29 16:45:42.643: INFO: Deleting pod "simpletest-rc-to-be-deleted-hdb5c" in namespace "gc-5484" - Jul 29 16:45:42.770: INFO: Deleting pod "simpletest-rc-to-be-deleted-hjghj" in namespace "gc-5484" - Jul 29 16:45:42.948: INFO: Deleting pod "simpletest-rc-to-be-deleted-j5vlx" in namespace "gc-5484" - Jul 29 16:45:42.981: INFO: Deleting pod "simpletest-rc-to-be-deleted-jf64r" in namespace "gc-5484" - [AfterEach] [sig-api-machinery] Garbage collector + [BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 + [It] should provide container's cpu request [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:221 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:49:21.321 + Aug 24 12:49:21.340: INFO: Waiting up to 5m0s for pod "downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1" in namespace "downward-api-5862" to be "Succeeded or Failed" + Aug 24 12:49:21.350: INFO: Pod "downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1": Phase="Pending", Reason="", readiness=false. Elapsed: 9.592018ms + Aug 24 12:49:23.359: INFO: Pod "downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018358522s + Aug 24 12:49:25.361: INFO: Pod "downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020640331s + STEP: Saw pod success 08/24/23 12:49:25.362 + Aug 24 12:49:25.362: INFO: Pod "downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1" satisfied condition "Succeeded or Failed" + Aug 24 12:49:25.371: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1 container client-container: + STEP: delete the pod 08/24/23 12:49:25.385 + Aug 24 12:49:25.409: INFO: Waiting for pod downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1 to disappear + Aug 24 12:49:25.417: INFO: Pod downwardapi-volume-1b073102-14d0-488f-97da-2d0fdd8620e1 no longer exists + [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 16:45:43.022: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + Aug 24 12:49:25.417: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "gc-5484" for this suite. 07/29/23 16:45:43.043 + STEP: Destroying namespace "downward-api-5862" for this suite. 08/24/23 12:49:25.428 << End Captured GinkgoWriter Output ------------------------------ -[sig-node] Variable Expansion - should allow composing env vars into new env vars [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:44 -[BeforeEach] [sig-node] Variable Expansion +SSSSS +------------------------------ +[sig-auth] Certificates API [Privileged:ClusterAdmin] + should support CSR API operations [Conformance] + test/e2e/auth/certificates.go:200 +[BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:45:43.156 -Jul 29 16:45:43.156: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename var-expansion 07/29/23 16:45:43.159 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:45:43.198 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:45:43.208 -[BeforeEach] [sig-node] Variable Expansion +STEP: Creating a kubernetes client 08/24/23 12:49:25.44 +Aug 24 12:49:25.441: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename certificates 08/24/23 12:49:25.444 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:25.468 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:25.473 +[BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should allow composing env vars into new env vars [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:44 -STEP: Creating a pod to test env composition 07/29/23 16:45:43.213 -Jul 29 16:45:43.248: INFO: Waiting up to 5m0s for pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef" in namespace "var-expansion-8367" to be "Succeeded or Failed" -Jul 29 16:45:43.269: INFO: Pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef": Phase="Pending", Reason="", readiness=false. Elapsed: 21.333865ms -Jul 29 16:45:45.276: INFO: Pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027821778s -Jul 29 16:45:47.277: INFO: Pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef": Phase="Pending", Reason="", readiness=false. Elapsed: 4.029327433s -Jul 29 16:45:49.279: INFO: Pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.031304798s -STEP: Saw pod success 07/29/23 16:45:49.279 -Jul 29 16:45:49.280: INFO: Pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef" satisfied condition "Succeeded or Failed" -Jul 29 16:45:49.287: INFO: Trying to get logs from node wetuj3nuajog-3 pod var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef container dapi-container: -STEP: delete the pod 07/29/23 16:45:49.327 -Jul 29 16:45:49.355: INFO: Waiting for pod var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef to disappear -Jul 29 16:45:49.367: INFO: Pod var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef no longer exists -[AfterEach] [sig-node] Variable Expansion +[It] should support CSR API operations [Conformance] + test/e2e/auth/certificates.go:200 +STEP: getting /apis 08/24/23 12:49:27.724 +STEP: getting /apis/certificates.k8s.io 08/24/23 12:49:27.729 +STEP: getting /apis/certificates.k8s.io/v1 08/24/23 12:49:27.731 +STEP: creating 08/24/23 12:49:27.733 +STEP: getting 08/24/23 12:49:27.762 +STEP: listing 08/24/23 12:49:27.768 +STEP: watching 08/24/23 12:49:27.774 +Aug 24 12:49:27.775: INFO: starting watch +STEP: patching 08/24/23 12:49:27.776 +STEP: updating 08/24/23 12:49:27.789 +Aug 24 12:49:27.798: INFO: waiting for watch events with expected annotations +Aug 24 12:49:27.798: INFO: saw patched and updated annotations +STEP: getting /approval 08/24/23 12:49:27.798 +STEP: patching /approval 08/24/23 12:49:27.806 +STEP: updating /approval 08/24/23 12:49:27.826 +STEP: getting /status 08/24/23 12:49:27.841 +STEP: patching /status 08/24/23 12:49:27.846 +STEP: updating /status 08/24/23 12:49:27.861 +STEP: deleting 08/24/23 12:49:27.875 +STEP: deleting a collection 08/24/23 12:49:27.898 +[AfterEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:45:49.370: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Variable Expansion +Aug 24 12:49:27.923: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "var-expansion-8367" for this suite. 07/29/23 16:45:49.381 +STEP: Destroying namespace "certificates-2209" for this suite. 08/24/23 12:49:27.953 ------------------------------ -• [SLOW TEST] [6.249 seconds] -[sig-node] Variable Expansion -test/e2e/common/node/framework.go:23 - should allow composing env vars into new env vars [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:44 +• [2.532 seconds] +[sig-auth] Certificates API [Privileged:ClusterAdmin] +test/e2e/auth/framework.go:23 + should support CSR API operations [Conformance] + test/e2e/auth/certificates.go:200 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Variable Expansion + [BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:45:43.156 - Jul 29 16:45:43.156: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename var-expansion 07/29/23 16:45:43.159 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:45:43.198 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:45:43.208 - [BeforeEach] [sig-node] Variable Expansion + STEP: Creating a kubernetes client 08/24/23 12:49:25.44 + Aug 24 12:49:25.441: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename certificates 08/24/23 12:49:25.444 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:25.468 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:25.473 + [BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should allow composing env vars into new env vars [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:44 - STEP: Creating a pod to test env composition 07/29/23 16:45:43.213 - Jul 29 16:45:43.248: INFO: Waiting up to 5m0s for pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef" in namespace "var-expansion-8367" to be "Succeeded or Failed" - Jul 29 16:45:43.269: INFO: Pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef": Phase="Pending", Reason="", readiness=false. Elapsed: 21.333865ms - Jul 29 16:45:45.276: INFO: Pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027821778s - Jul 29 16:45:47.277: INFO: Pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef": Phase="Pending", Reason="", readiness=false. Elapsed: 4.029327433s - Jul 29 16:45:49.279: INFO: Pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.031304798s - STEP: Saw pod success 07/29/23 16:45:49.279 - Jul 29 16:45:49.280: INFO: Pod "var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef" satisfied condition "Succeeded or Failed" - Jul 29 16:45:49.287: INFO: Trying to get logs from node wetuj3nuajog-3 pod var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef container dapi-container: - STEP: delete the pod 07/29/23 16:45:49.327 - Jul 29 16:45:49.355: INFO: Waiting for pod var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef to disappear - Jul 29 16:45:49.367: INFO: Pod var-expansion-b987eea4-cbdd-4329-af3d-6b7721a72eef no longer exists - [AfterEach] [sig-node] Variable Expansion + [It] should support CSR API operations [Conformance] + test/e2e/auth/certificates.go:200 + STEP: getting /apis 08/24/23 12:49:27.724 + STEP: getting /apis/certificates.k8s.io 08/24/23 12:49:27.729 + STEP: getting /apis/certificates.k8s.io/v1 08/24/23 12:49:27.731 + STEP: creating 08/24/23 12:49:27.733 + STEP: getting 08/24/23 12:49:27.762 + STEP: listing 08/24/23 12:49:27.768 + STEP: watching 08/24/23 12:49:27.774 + Aug 24 12:49:27.775: INFO: starting watch + STEP: patching 08/24/23 12:49:27.776 + STEP: updating 08/24/23 12:49:27.789 + Aug 24 12:49:27.798: INFO: waiting for watch events with expected annotations + Aug 24 12:49:27.798: INFO: saw patched and updated annotations + STEP: getting /approval 08/24/23 12:49:27.798 + STEP: patching /approval 08/24/23 12:49:27.806 + STEP: updating /approval 08/24/23 12:49:27.826 + STEP: getting /status 08/24/23 12:49:27.841 + STEP: patching /status 08/24/23 12:49:27.846 + STEP: updating /status 08/24/23 12:49:27.861 + STEP: deleting 08/24/23 12:49:27.875 + STEP: deleting a collection 08/24/23 12:49:27.898 + [AfterEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:45:49.370: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Variable Expansion + Aug 24 12:49:27.923: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "var-expansion-8367" for this suite. 07/29/23 16:45:49.381 + STEP: Destroying namespace "certificates-2209" for this suite. 08/24/23 12:49:27.953 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should serve a basic endpoint from pods [Conformance] - test/e2e/network/service.go:787 -[BeforeEach] [sig-network] Services +[sig-storage] Projected downwardAPI + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:261 +[BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:45:49.421 -Jul 29 16:45:49.422: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 16:45:49.424 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:45:49.465 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:45:49.472 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 12:49:27.976 +Aug 24 12:49:27.976: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:49:27.98 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:28.004 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:28.008 +[BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should serve a basic endpoint from pods [Conformance] - test/e2e/network/service.go:787 -STEP: creating service endpoint-test2 in namespace services-9182 07/29/23 16:45:49.479 -STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-9182 to expose endpoints map[] 07/29/23 16:45:49.502 -Jul 29 16:45:49.514: INFO: Failed go get Endpoints object: endpoints "endpoint-test2" not found -Jul 29 16:45:50.537: INFO: successfully validated that service endpoint-test2 in namespace services-9182 exposes endpoints map[] -STEP: Creating pod pod1 in namespace services-9182 07/29/23 16:45:50.537 -Jul 29 16:45:50.559: INFO: Waiting up to 5m0s for pod "pod1" in namespace "services-9182" to be "running and ready" -Jul 29 16:45:50.573: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 13.073635ms -Jul 29 16:45:50.573: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:45:52.594: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 2.034421731s -Jul 29 16:45:52.594: INFO: The phase of Pod pod1 is Running (Ready = true) -Jul 29 16:45:52.594: INFO: Pod "pod1" satisfied condition "running and ready" -STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-9182 to expose endpoints map[pod1:[80]] 07/29/23 16:45:52.603 -Jul 29 16:45:52.625: INFO: successfully validated that service endpoint-test2 in namespace services-9182 exposes endpoints map[pod1:[80]] -STEP: Checking if the Service forwards traffic to pod1 07/29/23 16:45:52.625 -Jul 29 16:45:52.626: INFO: Creating new exec pod -Jul 29 16:45:52.636: INFO: Waiting up to 5m0s for pod "execpodx7kfp" in namespace "services-9182" to be "running" -Jul 29 16:45:52.648: INFO: Pod "execpodx7kfp": Phase="Pending", Reason="", readiness=false. Elapsed: 12.753778ms -Jul 29 16:45:54.656: INFO: Pod "execpodx7kfp": Phase="Running", Reason="", readiness=true. Elapsed: 2.020006065s -Jul 29 16:45:54.656: INFO: Pod "execpodx7kfp" satisfied condition "running" -Jul 29 16:45:55.659: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' -Jul 29 16:45:55.977: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" -Jul 29 16:45:55.977: INFO: stdout: "" -Jul 29 16:45:55.978: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 10.233.10.218 80' -Jul 29 16:45:56.237: INFO: stderr: "+ nc -v -z -w 2 10.233.10.218 80\nConnection to 10.233.10.218 80 port [tcp/http] succeeded!\n" -Jul 29 16:45:56.237: INFO: stdout: "" -STEP: Creating pod pod2 in namespace services-9182 07/29/23 16:45:56.237 -Jul 29 16:45:56.254: INFO: Waiting up to 5m0s for pod "pod2" in namespace "services-9182" to be "running and ready" -Jul 29 16:45:56.264: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 9.891846ms -Jul 29 16:45:56.264: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:45:58.280: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02613039s -Jul 29 16:45:58.281: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:46:00.274: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 4.020636911s -Jul 29 16:46:00.275: INFO: The phase of Pod pod2 is Running (Ready = true) -Jul 29 16:46:00.275: INFO: Pod "pod2" satisfied condition "running and ready" -STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-9182 to expose endpoints map[pod1:[80] pod2:[80]] 07/29/23 16:46:00.287 -Jul 29 16:46:00.326: INFO: successfully validated that service endpoint-test2 in namespace services-9182 exposes endpoints map[pod1:[80] pod2:[80]] -STEP: Checking if the Service forwards traffic to pod1 and pod2 07/29/23 16:46:00.327 -Jul 29 16:46:01.329: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' -Jul 29 16:46:01.633: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" -Jul 29 16:46:01.633: INFO: stdout: "" -Jul 29 16:46:01.633: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 10.233.10.218 80' -Jul 29 16:46:01.858: INFO: stderr: "+ nc -v -z -w 2 10.233.10.218 80\nConnection to 10.233.10.218 80 port [tcp/http] succeeded!\n" -Jul 29 16:46:01.858: INFO: stdout: "" -STEP: Deleting pod pod1 in namespace services-9182 07/29/23 16:46:01.858 -STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-9182 to expose endpoints map[pod2:[80]] 07/29/23 16:46:01.887 -Jul 29 16:46:01.938: INFO: successfully validated that service endpoint-test2 in namespace services-9182 exposes endpoints map[pod2:[80]] -STEP: Checking if the Service forwards traffic to pod2 07/29/23 16:46:01.938 -Jul 29 16:46:02.940: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' -Jul 29 16:46:03.244: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" -Jul 29 16:46:03.244: INFO: stdout: "" -Jul 29 16:46:03.244: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 10.233.10.218 80' -Jul 29 16:46:03.514: INFO: stderr: "+ nc -v -z -w 2 10.233.10.218 80\nConnection to 10.233.10.218 80 port [tcp/http] succeeded!\n" -Jul 29 16:46:03.514: INFO: stdout: "" -STEP: Deleting pod pod2 in namespace services-9182 07/29/23 16:46:03.514 -STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-9182 to expose endpoints map[] 07/29/23 16:46:03.538 -Jul 29 16:46:03.574: INFO: successfully validated that service endpoint-test2 in namespace services-9182 exposes endpoints map[] -[AfterEach] [sig-network] Services +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:261 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:49:28.015 +Aug 24 12:49:28.034: INFO: Waiting up to 5m0s for pod "downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5" in namespace "projected-7286" to be "Succeeded or Failed" +Aug 24 12:49:28.088: INFO: Pod "downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5": Phase="Pending", Reason="", readiness=false. Elapsed: 53.011687ms +Aug 24 12:49:30.101: INFO: Pod "downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.066656554s +Aug 24 12:49:32.096: INFO: Pod "downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.061652944s +STEP: Saw pod success 08/24/23 12:49:32.096 +Aug 24 12:49:32.097: INFO: Pod "downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5" satisfied condition "Succeeded or Failed" +Aug 24 12:49:32.103: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5 container client-container: +STEP: delete the pod 08/24/23 12:49:32.115 +Aug 24 12:49:32.142: INFO: Waiting for pod downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5 to disappear +Aug 24 12:49:32.148: INFO: Pod downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 -Jul 29 16:46:03.625: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 12:49:32.148: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 -STEP: Destroying namespace "services-9182" for this suite. 07/29/23 16:46:03.637 +STEP: Destroying namespace "projected-7286" for this suite. 08/24/23 12:49:32.159 ------------------------------ -• [SLOW TEST] [14.261 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should serve a basic endpoint from pods [Conformance] - test/e2e/network/service.go:787 +• [4.198 seconds] +[sig-storage] Projected downwardAPI +test/e2e/common/storage/framework.go:23 + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:261 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:45:49.421 - Jul 29 16:45:49.422: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 16:45:49.424 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:45:49.465 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:45:49.472 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 12:49:27.976 + Aug 24 12:49:27.976: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:49:27.98 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:28.004 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:28.008 + [BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should serve a basic endpoint from pods [Conformance] - test/e2e/network/service.go:787 - STEP: creating service endpoint-test2 in namespace services-9182 07/29/23 16:45:49.479 - STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-9182 to expose endpoints map[] 07/29/23 16:45:49.502 - Jul 29 16:45:49.514: INFO: Failed go get Endpoints object: endpoints "endpoint-test2" not found - Jul 29 16:45:50.537: INFO: successfully validated that service endpoint-test2 in namespace services-9182 exposes endpoints map[] - STEP: Creating pod pod1 in namespace services-9182 07/29/23 16:45:50.537 - Jul 29 16:45:50.559: INFO: Waiting up to 5m0s for pod "pod1" in namespace "services-9182" to be "running and ready" - Jul 29 16:45:50.573: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 13.073635ms - Jul 29 16:45:50.573: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:45:52.594: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 2.034421731s - Jul 29 16:45:52.594: INFO: The phase of Pod pod1 is Running (Ready = true) - Jul 29 16:45:52.594: INFO: Pod "pod1" satisfied condition "running and ready" - STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-9182 to expose endpoints map[pod1:[80]] 07/29/23 16:45:52.603 - Jul 29 16:45:52.625: INFO: successfully validated that service endpoint-test2 in namespace services-9182 exposes endpoints map[pod1:[80]] - STEP: Checking if the Service forwards traffic to pod1 07/29/23 16:45:52.625 - Jul 29 16:45:52.626: INFO: Creating new exec pod - Jul 29 16:45:52.636: INFO: Waiting up to 5m0s for pod "execpodx7kfp" in namespace "services-9182" to be "running" - Jul 29 16:45:52.648: INFO: Pod "execpodx7kfp": Phase="Pending", Reason="", readiness=false. Elapsed: 12.753778ms - Jul 29 16:45:54.656: INFO: Pod "execpodx7kfp": Phase="Running", Reason="", readiness=true. Elapsed: 2.020006065s - Jul 29 16:45:54.656: INFO: Pod "execpodx7kfp" satisfied condition "running" - Jul 29 16:45:55.659: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' - Jul 29 16:45:55.977: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" - Jul 29 16:45:55.977: INFO: stdout: "" - Jul 29 16:45:55.978: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 10.233.10.218 80' - Jul 29 16:45:56.237: INFO: stderr: "+ nc -v -z -w 2 10.233.10.218 80\nConnection to 10.233.10.218 80 port [tcp/http] succeeded!\n" - Jul 29 16:45:56.237: INFO: stdout: "" - STEP: Creating pod pod2 in namespace services-9182 07/29/23 16:45:56.237 - Jul 29 16:45:56.254: INFO: Waiting up to 5m0s for pod "pod2" in namespace "services-9182" to be "running and ready" - Jul 29 16:45:56.264: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 9.891846ms - Jul 29 16:45:56.264: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:45:58.280: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02613039s - Jul 29 16:45:58.281: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:46:00.274: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 4.020636911s - Jul 29 16:46:00.275: INFO: The phase of Pod pod2 is Running (Ready = true) - Jul 29 16:46:00.275: INFO: Pod "pod2" satisfied condition "running and ready" - STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-9182 to expose endpoints map[pod1:[80] pod2:[80]] 07/29/23 16:46:00.287 - Jul 29 16:46:00.326: INFO: successfully validated that service endpoint-test2 in namespace services-9182 exposes endpoints map[pod1:[80] pod2:[80]] - STEP: Checking if the Service forwards traffic to pod1 and pod2 07/29/23 16:46:00.327 - Jul 29 16:46:01.329: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' - Jul 29 16:46:01.633: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" - Jul 29 16:46:01.633: INFO: stdout: "" - Jul 29 16:46:01.633: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 10.233.10.218 80' - Jul 29 16:46:01.858: INFO: stderr: "+ nc -v -z -w 2 10.233.10.218 80\nConnection to 10.233.10.218 80 port [tcp/http] succeeded!\n" - Jul 29 16:46:01.858: INFO: stdout: "" - STEP: Deleting pod pod1 in namespace services-9182 07/29/23 16:46:01.858 - STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-9182 to expose endpoints map[pod2:[80]] 07/29/23 16:46:01.887 - Jul 29 16:46:01.938: INFO: successfully validated that service endpoint-test2 in namespace services-9182 exposes endpoints map[pod2:[80]] - STEP: Checking if the Service forwards traffic to pod2 07/29/23 16:46:01.938 - Jul 29 16:46:02.940: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 endpoint-test2 80' - Jul 29 16:46:03.244: INFO: stderr: "+ nc -v -z -w 2 endpoint-test2 80\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" - Jul 29 16:46:03.244: INFO: stdout: "" - Jul 29 16:46:03.244: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9182 exec execpodx7kfp -- /bin/sh -x -c nc -v -z -w 2 10.233.10.218 80' - Jul 29 16:46:03.514: INFO: stderr: "+ nc -v -z -w 2 10.233.10.218 80\nConnection to 10.233.10.218 80 port [tcp/http] succeeded!\n" - Jul 29 16:46:03.514: INFO: stdout: "" - STEP: Deleting pod pod2 in namespace services-9182 07/29/23 16:46:03.514 - STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-9182 to expose endpoints map[] 07/29/23 16:46:03.538 - Jul 29 16:46:03.574: INFO: successfully validated that service endpoint-test2 in namespace services-9182 exposes endpoints map[] - [AfterEach] [sig-network] Services + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 + [It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:261 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:49:28.015 + Aug 24 12:49:28.034: INFO: Waiting up to 5m0s for pod "downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5" in namespace "projected-7286" to be "Succeeded or Failed" + Aug 24 12:49:28.088: INFO: Pod "downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5": Phase="Pending", Reason="", readiness=false. Elapsed: 53.011687ms + Aug 24 12:49:30.101: INFO: Pod "downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.066656554s + Aug 24 12:49:32.096: INFO: Pod "downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.061652944s + STEP: Saw pod success 08/24/23 12:49:32.096 + Aug 24 12:49:32.097: INFO: Pod "downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5" satisfied condition "Succeeded or Failed" + Aug 24 12:49:32.103: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5 container client-container: + STEP: delete the pod 08/24/23 12:49:32.115 + Aug 24 12:49:32.142: INFO: Waiting for pod downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5 to disappear + Aug 24 12:49:32.148: INFO: Pod downwardapi-volume-264d7747-8f9a-4bf7-ab23-0d10542656c5 no longer exists + [AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 - Jul 29 16:46:03.625: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 12:49:32.148: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 - STEP: Destroying namespace "services-9182" for this suite. 07/29/23 16:46:03.637 + STEP: Destroying namespace "projected-7286" for this suite. 08/24/23 12:49:32.159 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSS +SSSSSSSS ------------------------------ -[sig-node] Security Context When creating a pod with readOnlyRootFilesystem - should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:486 -[BeforeEach] [sig-node] Security Context +[sig-storage] Projected downwardAPI + should provide container's memory request [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:235 +[BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:46:03.691 -Jul 29 16:46:03.692: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename security-context-test 07/29/23 16:46:03.696 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:46:03.753 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:46:03.76 -[BeforeEach] [sig-node] Security Context +STEP: Creating a kubernetes client 08/24/23 12:49:32.178 +Aug 24 12:49:32.178: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:49:32.18 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:32.21 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:32.215 +[BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Security Context - test/e2e/common/node/security_context.go:50 -[It] should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:486 -Jul 29 16:46:03.799: INFO: Waiting up to 5m0s for pod "busybox-readonly-false-665ac280-14a5-473d-af93-0b64154275df" in namespace "security-context-test-9911" to be "Succeeded or Failed" -Jul 29 16:46:03.816: INFO: Pod "busybox-readonly-false-665ac280-14a5-473d-af93-0b64154275df": Phase="Pending", Reason="", readiness=false. Elapsed: 16.620976ms -Jul 29 16:46:05.827: INFO: Pod "busybox-readonly-false-665ac280-14a5-473d-af93-0b64154275df": Phase="Pending", Reason="", readiness=false. Elapsed: 2.0278809s -Jul 29 16:46:07.828: INFO: Pod "busybox-readonly-false-665ac280-14a5-473d-af93-0b64154275df": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029031964s -Jul 29 16:46:07.828: INFO: Pod "busybox-readonly-false-665ac280-14a5-473d-af93-0b64154275df" satisfied condition "Succeeded or Failed" -[AfterEach] [sig-node] Security Context +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 +[It] should provide container's memory request [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:235 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:49:32.22 +Aug 24 12:49:32.237: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191" in namespace "projected-5010" to be "Succeeded or Failed" +Aug 24 12:49:32.246: INFO: Pod "downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191": Phase="Pending", Reason="", readiness=false. Elapsed: 8.803028ms +Aug 24 12:49:34.261: INFO: Pod "downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023307253s +Aug 24 12:49:36.253: INFO: Pod "downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015999699s +STEP: Saw pod success 08/24/23 12:49:36.253 +Aug 24 12:49:36.254: INFO: Pod "downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191" satisfied condition "Succeeded or Failed" +Aug 24 12:49:36.260: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191 container client-container: +STEP: delete the pod 08/24/23 12:49:36.269 +Aug 24 12:49:36.296: INFO: Waiting for pod downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191 to disappear +Aug 24 12:49:36.305: INFO: Pod downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 -Jul 29 16:46:07.829: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Security Context +Aug 24 12:49:36.306: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Security Context +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Security Context +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 -STEP: Destroying namespace "security-context-test-9911" for this suite. 07/29/23 16:46:07.84 +STEP: Destroying namespace "projected-5010" for this suite. 08/24/23 12:49:36.315 ------------------------------ -• [4.164 seconds] -[sig-node] Security Context -test/e2e/common/node/framework.go:23 - When creating a pod with readOnlyRootFilesystem - test/e2e/common/node/security_context.go:430 - should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:486 +• [4.153 seconds] +[sig-storage] Projected downwardAPI +test/e2e/common/storage/framework.go:23 + should provide container's memory request [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:235 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Security Context + [BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:46:03.691 - Jul 29 16:46:03.692: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename security-context-test 07/29/23 16:46:03.696 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:46:03.753 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:46:03.76 - [BeforeEach] [sig-node] Security Context + STEP: Creating a kubernetes client 08/24/23 12:49:32.178 + Aug 24 12:49:32.178: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:49:32.18 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:32.21 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:32.215 + [BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Security Context - test/e2e/common/node/security_context.go:50 - [It] should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:486 - Jul 29 16:46:03.799: INFO: Waiting up to 5m0s for pod "busybox-readonly-false-665ac280-14a5-473d-af93-0b64154275df" in namespace "security-context-test-9911" to be "Succeeded or Failed" - Jul 29 16:46:03.816: INFO: Pod "busybox-readonly-false-665ac280-14a5-473d-af93-0b64154275df": Phase="Pending", Reason="", readiness=false. Elapsed: 16.620976ms - Jul 29 16:46:05.827: INFO: Pod "busybox-readonly-false-665ac280-14a5-473d-af93-0b64154275df": Phase="Pending", Reason="", readiness=false. Elapsed: 2.0278809s - Jul 29 16:46:07.828: INFO: Pod "busybox-readonly-false-665ac280-14a5-473d-af93-0b64154275df": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029031964s - Jul 29 16:46:07.828: INFO: Pod "busybox-readonly-false-665ac280-14a5-473d-af93-0b64154275df" satisfied condition "Succeeded or Failed" - [AfterEach] [sig-node] Security Context + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 + [It] should provide container's memory request [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:235 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:49:32.22 + Aug 24 12:49:32.237: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191" in namespace "projected-5010" to be "Succeeded or Failed" + Aug 24 12:49:32.246: INFO: Pod "downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191": Phase="Pending", Reason="", readiness=false. Elapsed: 8.803028ms + Aug 24 12:49:34.261: INFO: Pod "downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023307253s + Aug 24 12:49:36.253: INFO: Pod "downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015999699s + STEP: Saw pod success 08/24/23 12:49:36.253 + Aug 24 12:49:36.254: INFO: Pod "downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191" satisfied condition "Succeeded or Failed" + Aug 24 12:49:36.260: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191 container client-container: + STEP: delete the pod 08/24/23 12:49:36.269 + Aug 24 12:49:36.296: INFO: Waiting for pod downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191 to disappear + Aug 24 12:49:36.305: INFO: Pod downwardapi-volume-c7d36457-02f2-48c9-af0a-51dbdfd01191 no longer exists + [AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 - Jul 29 16:46:07.829: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Security Context + Aug 24 12:49:36.306: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Security Context + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Security Context + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 - STEP: Destroying namespace "security-context-test-9911" for this suite. 07/29/23 16:46:07.84 + STEP: Destroying namespace "projected-5010" for this suite. 08/24/23 12:49:36.315 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSS +SS ------------------------------ -[sig-node] Variable Expansion - should allow substituting values in a container's args [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:92 -[BeforeEach] [sig-node] Variable Expansion +[sig-network] Service endpoints latency + should not be very high [Conformance] + test/e2e/network/service_latency.go:59 +[BeforeEach] [sig-network] Service endpoints latency set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:46:07.863 -Jul 29 16:46:07.864: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename var-expansion 07/29/23 16:46:07.866 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:46:07.898 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:46:07.903 -[BeforeEach] [sig-node] Variable Expansion +STEP: Creating a kubernetes client 08/24/23 12:49:36.334 +Aug 24 12:49:36.334: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename svc-latency 08/24/23 12:49:36.337 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:36.379 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:36.386 +[BeforeEach] [sig-network] Service endpoints latency test/e2e/framework/metrics/init/init.go:31 -[It] should allow substituting values in a container's args [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:92 -STEP: Creating a pod to test substitution in container's args 07/29/23 16:46:07.908 -Jul 29 16:46:07.938: INFO: Waiting up to 5m0s for pod "var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb" in namespace "var-expansion-5998" to be "Succeeded or Failed" -Jul 29 16:46:07.951: INFO: Pod "var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb": Phase="Pending", Reason="", readiness=false. Elapsed: 12.770583ms -Jul 29 16:46:09.959: INFO: Pod "var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020149564s -Jul 29 16:46:11.965: INFO: Pod "var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02608796s -STEP: Saw pod success 07/29/23 16:46:11.965 -Jul 29 16:46:11.965: INFO: Pod "var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb" satisfied condition "Succeeded or Failed" -Jul 29 16:46:11.971: INFO: Trying to get logs from node wetuj3nuajog-3 pod var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb container dapi-container: -STEP: delete the pod 07/29/23 16:46:11.993 -Jul 29 16:46:12.021: INFO: Waiting for pod var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb to disappear -Jul 29 16:46:12.027: INFO: Pod var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb no longer exists -[AfterEach] [sig-node] Variable Expansion +[It] should not be very high [Conformance] + test/e2e/network/service_latency.go:59 +Aug 24 12:49:36.391: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: creating replication controller svc-latency-rc in namespace svc-latency-2999 08/24/23 12:49:36.393 +I0824 12:49:36.405795 14 runners.go:193] Created replication controller with name: svc-latency-rc, namespace: svc-latency-2999, replica count: 1 +I0824 12:49:37.457760 14 runners.go:193] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0824 12:49:38.458412 14 runners.go:193] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Aug 24 12:49:38.582: INFO: Created: latency-svc-9cb5k +Aug 24 12:49:38.604: INFO: Got endpoints: latency-svc-9cb5k [43.942554ms] +Aug 24 12:49:38.640: INFO: Created: latency-svc-qqqhr +Aug 24 12:49:38.660: INFO: Got endpoints: latency-svc-qqqhr [56.063584ms] +Aug 24 12:49:38.675: INFO: Created: latency-svc-2llmf +Aug 24 12:49:38.690: INFO: Got endpoints: latency-svc-2llmf [84.157523ms] +Aug 24 12:49:38.698: INFO: Created: latency-svc-6lcfp +Aug 24 12:49:38.714: INFO: Created: latency-svc-zgpb7 +Aug 24 12:49:38.721: INFO: Got endpoints: latency-svc-6lcfp [114.472563ms] +Aug 24 12:49:38.733: INFO: Got endpoints: latency-svc-zgpb7 [125.146115ms] +Aug 24 12:49:38.736: INFO: Created: latency-svc-t42np +Aug 24 12:49:38.751: INFO: Created: latency-svc-6blnt +Aug 24 12:49:38.758: INFO: Got endpoints: latency-svc-t42np [151.579914ms] +Aug 24 12:49:38.765: INFO: Got endpoints: latency-svc-6blnt [156.814153ms] +Aug 24 12:49:38.777: INFO: Created: latency-svc-n2k2d +Aug 24 12:49:38.785: INFO: Got endpoints: latency-svc-n2k2d [170.228686ms] +Aug 24 12:49:38.808: INFO: Created: latency-svc-n5m44 +Aug 24 12:49:38.809: INFO: Created: latency-svc-bs9f9 +Aug 24 12:49:38.824: INFO: Got endpoints: latency-svc-bs9f9 [212.587727ms] +Aug 24 12:49:38.825: INFO: Created: latency-svc-w6gvq +Aug 24 12:49:38.830: INFO: Got endpoints: latency-svc-n5m44 [219.626729ms] +Aug 24 12:49:38.848: INFO: Got endpoints: latency-svc-w6gvq [233.126352ms] +Aug 24 12:49:38.857: INFO: Created: latency-svc-2vssk +Aug 24 12:49:38.859: INFO: Got endpoints: latency-svc-2vssk [243.49353ms] +Aug 24 12:49:38.863: INFO: Created: latency-svc-6v2lz +Aug 24 12:49:38.883: INFO: Got endpoints: latency-svc-6v2lz [267.892004ms] +Aug 24 12:49:38.892: INFO: Created: latency-svc-ks8mh +Aug 24 12:49:38.906: INFO: Got endpoints: latency-svc-ks8mh [290.706541ms] +Aug 24 12:49:39.115: INFO: Created: latency-svc-cgwfk +Aug 24 12:49:39.115: INFO: Created: latency-svc-ttl6v +Aug 24 12:49:39.116: INFO: Created: latency-svc-2pq94 +Aug 24 12:49:39.131: INFO: Created: latency-svc-qnbdz +Aug 24 12:49:39.131: INFO: Created: latency-svc-4j5jk +Aug 24 12:49:39.131: INFO: Created: latency-svc-4fmm7 +Aug 24 12:49:39.132: INFO: Created: latency-svc-9sgmk +Aug 24 12:49:39.132: INFO: Created: latency-svc-p76hw +Aug 24 12:49:39.135: INFO: Created: latency-svc-bpmlm +Aug 24 12:49:39.137: INFO: Created: latency-svc-xwxfq +Aug 24 12:49:39.137: INFO: Created: latency-svc-n7bpk +Aug 24 12:49:39.138: INFO: Created: latency-svc-zl9tb +Aug 24 12:49:39.140: INFO: Created: latency-svc-jq2hc +Aug 24 12:49:39.140: INFO: Created: latency-svc-tc2df +Aug 24 12:49:39.142: INFO: Created: latency-svc-8ntmw +Aug 24 12:49:39.172: INFO: Got endpoints: latency-svc-2pq94 [555.98563ms] +Aug 24 12:49:39.173: INFO: Got endpoints: latency-svc-4j5jk [267.366666ms] +Aug 24 12:49:39.178: INFO: Got endpoints: latency-svc-ttl6v [456.158586ms] +Aug 24 12:49:39.186: INFO: Got endpoints: latency-svc-cgwfk [420.848329ms] +Aug 24 12:49:39.215: INFO: Got endpoints: latency-svc-p76hw [524.373733ms] +Aug 24 12:49:39.226: INFO: Got endpoints: latency-svc-4fmm7 [395.646699ms] +Aug 24 12:49:39.227: INFO: Got endpoints: latency-svc-9sgmk [403.464416ms] +Aug 24 12:49:39.228: INFO: Got endpoints: latency-svc-qnbdz [369.231289ms] +Aug 24 12:49:39.232: INFO: Got endpoints: latency-svc-xwxfq [473.879001ms] +Aug 24 12:49:39.276: INFO: Created: latency-svc-b7x6g +Aug 24 12:49:39.293: INFO: Got endpoints: latency-svc-n7bpk [444.71012ms] +Aug 24 12:49:39.295: INFO: Got endpoints: latency-svc-zl9tb [634.312931ms] +Aug 24 12:49:39.296: INFO: Got endpoints: latency-svc-8ntmw [562.625256ms] +Aug 24 12:49:39.299: INFO: Got endpoints: latency-svc-bpmlm [513.680755ms] +Aug 24 12:49:39.299: INFO: Got endpoints: latency-svc-jq2hc [683.271252ms] +Aug 24 12:49:39.322: INFO: Got endpoints: latency-svc-tc2df [439.362204ms] +Aug 24 12:49:39.330: INFO: Created: latency-svc-jj5s8 +Aug 24 12:49:39.341: INFO: Got endpoints: latency-svc-b7x6g [168.904394ms] +Aug 24 12:49:39.342: INFO: Got endpoints: latency-svc-jj5s8 [126.951742ms] +Aug 24 12:49:39.363: INFO: Created: latency-svc-lgnq4 +Aug 24 12:49:39.369: INFO: Got endpoints: latency-svc-lgnq4 [143.417106ms] +Aug 24 12:49:39.380: INFO: Created: latency-svc-2nvb4 +Aug 24 12:49:39.404: INFO: Got endpoints: latency-svc-2nvb4 [226.139178ms] +Aug 24 12:49:39.424: INFO: Created: latency-svc-cqdzp +Aug 24 12:49:39.440: INFO: Got endpoints: latency-svc-cqdzp [254.217879ms] +Aug 24 12:49:39.496: INFO: Created: latency-svc-4n8sm +Aug 24 12:49:39.498: INFO: Got endpoints: latency-svc-4n8sm [324.455077ms] +Aug 24 12:49:39.517: INFO: Created: latency-svc-lr4bp +Aug 24 12:49:39.525: INFO: Got endpoints: latency-svc-lr4bp [297.939326ms] +Aug 24 12:49:39.547: INFO: Created: latency-svc-q7cw8 +Aug 24 12:49:39.560: INFO: Got endpoints: latency-svc-q7cw8 [327.216494ms] +Aug 24 12:49:39.574: INFO: Created: latency-svc-jdjkr +Aug 24 12:49:39.587: INFO: Got endpoints: latency-svc-jdjkr [358.332465ms] +Aug 24 12:49:39.615: INFO: Created: latency-svc-6k7sw +Aug 24 12:49:39.630: INFO: Got endpoints: latency-svc-6k7sw [334.957207ms] +Aug 24 12:49:39.638: INFO: Created: latency-svc-6ghm9 +Aug 24 12:49:39.664: INFO: Got endpoints: latency-svc-6ghm9 [364.161732ms] +Aug 24 12:49:39.682: INFO: Created: latency-svc-4pf74 +Aug 24 12:49:39.709: INFO: Created: latency-svc-s5zj4 +Aug 24 12:49:39.713: INFO: Got endpoints: latency-svc-4pf74 [419.678946ms] +Aug 24 12:49:39.722: INFO: Created: latency-svc-qw78k +Aug 24 12:49:39.734: INFO: Got endpoints: latency-svc-s5zj4 [438.756726ms] +Aug 24 12:49:39.755: INFO: Created: latency-svc-nfh8g +Aug 24 12:49:39.759: INFO: Got endpoints: latency-svc-qw78k [459.622892ms] +Aug 24 12:49:39.766: INFO: Got endpoints: latency-svc-nfh8g [443.892767ms] +Aug 24 12:49:39.786: INFO: Created: latency-svc-6qghf +Aug 24 12:49:39.797: INFO: Created: latency-svc-w7knl +Aug 24 12:49:39.803: INFO: Got endpoints: latency-svc-6qghf [461.754553ms] +Aug 24 12:49:39.818: INFO: Got endpoints: latency-svc-w7knl [476.051439ms] +Aug 24 12:49:39.840: INFO: Created: latency-svc-f9skq +Aug 24 12:49:39.846: INFO: Got endpoints: latency-svc-f9skq [476.162731ms] +Aug 24 12:49:39.994: INFO: Created: latency-svc-cm8pj +Aug 24 12:49:39.995: INFO: Created: latency-svc-kdtct +Aug 24 12:49:40.001: INFO: Created: latency-svc-8zw9x +Aug 24 12:49:40.002: INFO: Created: latency-svc-2bq8t +Aug 24 12:49:40.032: INFO: Created: latency-svc-jh87n +Aug 24 12:49:40.033: INFO: Created: latency-svc-cxjkm +Aug 24 12:49:40.034: INFO: Created: latency-svc-52g2z +Aug 24 12:49:40.034: INFO: Created: latency-svc-zg5fc +Aug 24 12:49:40.034: INFO: Created: latency-svc-n2r5t +Aug 24 12:49:40.035: INFO: Created: latency-svc-b46mh +Aug 24 12:49:40.034: INFO: Created: latency-svc-k7hjk +Aug 24 12:49:40.035: INFO: Created: latency-svc-bdlhc +Aug 24 12:49:40.036: INFO: Created: latency-svc-sbk4j +Aug 24 12:49:40.036: INFO: Created: latency-svc-nbrhv +Aug 24 12:49:40.037: INFO: Created: latency-svc-5s86b +Aug 24 12:49:40.086: INFO: Got endpoints: latency-svc-cm8pj [422.2223ms] +Aug 24 12:49:40.112: INFO: Got endpoints: latency-svc-k7hjk [266.254622ms] +Aug 24 12:49:40.113: INFO: Got endpoints: latency-svc-2bq8t [482.3205ms] +Aug 24 12:49:40.114: INFO: Got endpoints: latency-svc-kdtct [525.576932ms] +Aug 24 12:49:40.114: INFO: Got endpoints: latency-svc-8zw9x [673.462216ms] +Aug 24 12:49:40.135: INFO: Got endpoints: latency-svc-cxjkm [730.239621ms] +Aug 24 12:49:40.163: INFO: Got endpoints: latency-svc-jh87n [449.721282ms] +Aug 24 12:49:40.171: INFO: Created: latency-svc-vkkhs +Aug 24 12:49:40.203: INFO: Got endpoints: latency-svc-sbk4j [399.618346ms] +Aug 24 12:49:40.205: INFO: Got endpoints: latency-svc-5s86b [470.635399ms] +Aug 24 12:49:40.207: INFO: Got endpoints: latency-svc-n2r5t [709.105444ms] +Aug 24 12:49:40.209: INFO: Got endpoints: latency-svc-52g2z [442.562917ms] +Aug 24 12:49:40.215: INFO: Created: latency-svc-g56hc +Aug 24 12:49:40.221: INFO: Got endpoints: latency-svc-zg5fc [695.078929ms] +Aug 24 12:49:40.221: INFO: Got endpoints: latency-svc-b46mh [462.625648ms] +Aug 24 12:49:40.237: INFO: Created: latency-svc-qzqhl +Aug 24 12:49:40.240: INFO: Got endpoints: latency-svc-nbrhv [421.873402ms] +Aug 24 12:49:40.240: INFO: Got endpoints: latency-svc-bdlhc [680.53894ms] +Aug 24 12:49:40.249: INFO: Created: latency-svc-xdsdp +Aug 24 12:49:40.251: INFO: Got endpoints: latency-svc-vkkhs [165.530931ms] +Aug 24 12:49:40.261: INFO: Created: latency-svc-6p87w +Aug 24 12:49:40.271: INFO: Created: latency-svc-7d86s +Aug 24 12:49:40.296: INFO: Got endpoints: latency-svc-g56hc [183.093192ms] +Aug 24 12:49:40.346: INFO: Got endpoints: latency-svc-qzqhl [232.327456ms] +Aug 24 12:49:40.396: INFO: Got endpoints: latency-svc-xdsdp [283.06359ms] +Aug 24 12:49:40.430: INFO: Created: latency-svc-b9zg7 +Aug 24 12:49:40.430: INFO: Created: latency-svc-mn5zq +Aug 24 12:49:40.434: INFO: Created: latency-svc-cqd2t +Aug 24 12:49:40.455: INFO: Got endpoints: latency-svc-6p87w [341.10324ms] +Aug 24 12:49:40.466: INFO: Created: latency-svc-znb8r +Aug 24 12:49:40.479: INFO: Created: latency-svc-ksvf5 +Aug 24 12:49:40.479: INFO: Created: latency-svc-xkwcp +Aug 24 12:49:40.480: INFO: Created: latency-svc-zlnjs +Aug 24 12:49:40.480: INFO: Created: latency-svc-b8skg +Aug 24 12:49:40.481: INFO: Created: latency-svc-bfl6x +Aug 24 12:49:40.481: INFO: Created: latency-svc-9kc8c +Aug 24 12:49:40.482: INFO: Created: latency-svc-rzzr5 +Aug 24 12:49:40.491: INFO: Created: latency-svc-kvpxf +Aug 24 12:49:40.492: INFO: Created: latency-svc-xvbp6 +Aug 24 12:49:40.504: INFO: Got endpoints: latency-svc-7d86s [368.862262ms] +Aug 24 12:49:40.516: INFO: Created: latency-svc-56g2n +Aug 24 12:49:40.532: INFO: Created: latency-svc-jmp46 +Aug 24 12:49:40.548: INFO: Got endpoints: latency-svc-mn5zq [296.066397ms] +Aug 24 12:49:40.568: INFO: Created: latency-svc-rbb2n +Aug 24 12:49:40.595: INFO: Got endpoints: latency-svc-b9zg7 [431.110147ms] +Aug 24 12:49:40.613: INFO: Created: latency-svc-d2dsr +Aug 24 12:49:40.653: INFO: Got endpoints: latency-svc-cqd2t [450.566273ms] +Aug 24 12:49:40.677: INFO: Created: latency-svc-kp989 +Aug 24 12:49:40.703: INFO: Got endpoints: latency-svc-znb8r [406.392154ms] +Aug 24 12:49:40.733: INFO: Created: latency-svc-czrlm +Aug 24 12:49:40.752: INFO: Got endpoints: latency-svc-xvbp6 [530.214258ms] +Aug 24 12:49:40.779: INFO: Created: latency-svc-nttk4 +Aug 24 12:49:40.799: INFO: Got endpoints: latency-svc-kvpxf [589.786908ms] +Aug 24 12:49:40.821: INFO: Created: latency-svc-8rfmr +Aug 24 12:49:40.847: INFO: Got endpoints: latency-svc-zlnjs [451.511288ms] +Aug 24 12:49:40.875: INFO: Created: latency-svc-xsbls +Aug 24 12:49:40.896: INFO: Got endpoints: latency-svc-xkwcp [675.331847ms] +Aug 24 12:49:40.914: INFO: Created: latency-svc-54gxm +Aug 24 12:49:40.950: INFO: Got endpoints: latency-svc-rzzr5 [709.640934ms] +Aug 24 12:49:40.972: INFO: Created: latency-svc-4hdfk +Aug 24 12:49:40.994: INFO: Got endpoints: latency-svc-9kc8c [754.236635ms] +Aug 24 12:49:41.013: INFO: Created: latency-svc-sbt88 +Aug 24 12:49:41.052: INFO: Got endpoints: latency-svc-ksvf5 [844.367675ms] +Aug 24 12:49:41.082: INFO: Created: latency-svc-f42s8 +Aug 24 12:49:41.096: INFO: Got endpoints: latency-svc-b8skg [749.689492ms] +Aug 24 12:49:41.130: INFO: Created: latency-svc-fdpsf +Aug 24 12:49:41.145: INFO: Got endpoints: latency-svc-bfl6x [939.574799ms] +Aug 24 12:49:41.180: INFO: Created: latency-svc-khbt8 +Aug 24 12:49:41.195: INFO: Got endpoints: latency-svc-56g2n [739.944485ms] +Aug 24 12:49:41.225: INFO: Created: latency-svc-msjpv +Aug 24 12:49:41.247: INFO: Got endpoints: latency-svc-jmp46 [742.85621ms] +Aug 24 12:49:41.263: INFO: Created: latency-svc-rvlcq +Aug 24 12:49:41.294: INFO: Got endpoints: latency-svc-rbb2n [745.832041ms] +Aug 24 12:49:41.320: INFO: Created: latency-svc-zc8vx +Aug 24 12:49:41.353: INFO: Got endpoints: latency-svc-d2dsr [758.19094ms] +Aug 24 12:49:41.385: INFO: Created: latency-svc-n4s86 +Aug 24 12:49:41.392: INFO: Got endpoints: latency-svc-kp989 [738.286619ms] +Aug 24 12:49:41.429: INFO: Created: latency-svc-dktvt +Aug 24 12:49:41.446: INFO: Got endpoints: latency-svc-czrlm [742.966587ms] +Aug 24 12:49:41.473: INFO: Created: latency-svc-9dlw2 +Aug 24 12:49:41.497: INFO: Got endpoints: latency-svc-nttk4 [744.152701ms] +Aug 24 12:49:41.522: INFO: Created: latency-svc-gp4pk +Aug 24 12:49:41.542: INFO: Got endpoints: latency-svc-8rfmr [743.040996ms] +Aug 24 12:49:41.560: INFO: Created: latency-svc-wwljq +Aug 24 12:49:41.595: INFO: Got endpoints: latency-svc-xsbls [747.613239ms] +Aug 24 12:49:41.621: INFO: Created: latency-svc-56wvd +Aug 24 12:49:41.653: INFO: Got endpoints: latency-svc-54gxm [756.857646ms] +Aug 24 12:49:41.674: INFO: Created: latency-svc-6hqlh +Aug 24 12:49:41.702: INFO: Got endpoints: latency-svc-4hdfk [751.972848ms] +Aug 24 12:49:41.724: INFO: Created: latency-svc-4pmxr +Aug 24 12:49:41.746: INFO: Got endpoints: latency-svc-sbt88 [752.031469ms] +Aug 24 12:49:41.783: INFO: Created: latency-svc-9cgvj +Aug 24 12:49:41.797: INFO: Got endpoints: latency-svc-f42s8 [745.202476ms] +Aug 24 12:49:41.814: INFO: Created: latency-svc-hprcf +Aug 24 12:49:41.848: INFO: Got endpoints: latency-svc-fdpsf [751.342216ms] +Aug 24 12:49:41.874: INFO: Created: latency-svc-h59vd +Aug 24 12:49:41.900: INFO: Got endpoints: latency-svc-khbt8 [754.894792ms] +Aug 24 12:49:41.928: INFO: Created: latency-svc-fxzp4 +Aug 24 12:49:41.949: INFO: Got endpoints: latency-svc-msjpv [753.692722ms] +Aug 24 12:49:41.972: INFO: Created: latency-svc-l78qs +Aug 24 12:49:41.992: INFO: Got endpoints: latency-svc-rvlcq [745.388433ms] +Aug 24 12:49:42.015: INFO: Created: latency-svc-jdf4t +Aug 24 12:49:42.057: INFO: Got endpoints: latency-svc-zc8vx [762.980733ms] +Aug 24 12:49:42.109: INFO: Got endpoints: latency-svc-n4s86 [755.333375ms] +Aug 24 12:49:42.116: INFO: Created: latency-svc-m99q8 +Aug 24 12:49:42.132: INFO: Created: latency-svc-v2gk8 +Aug 24 12:49:42.148: INFO: Got endpoints: latency-svc-dktvt [756.328611ms] +Aug 24 12:49:42.209: INFO: Got endpoints: latency-svc-9dlw2 [762.193337ms] +Aug 24 12:49:42.218: INFO: Created: latency-svc-txcpg +Aug 24 12:49:42.245: INFO: Created: latency-svc-xk89s +Aug 24 12:49:42.257: INFO: Got endpoints: latency-svc-gp4pk [759.561923ms] +Aug 24 12:49:42.276: INFO: Created: latency-svc-pxlkb +Aug 24 12:49:42.299: INFO: Got endpoints: latency-svc-wwljq [756.892822ms] +Aug 24 12:49:42.318: INFO: Created: latency-svc-xlql7 +Aug 24 12:49:42.350: INFO: Got endpoints: latency-svc-56wvd [755.004249ms] +Aug 24 12:49:42.376: INFO: Created: latency-svc-7br9g +Aug 24 12:49:42.400: INFO: Got endpoints: latency-svc-6hqlh [746.248804ms] +Aug 24 12:49:42.417: INFO: Created: latency-svc-2h4m5 +Aug 24 12:49:42.442: INFO: Got endpoints: latency-svc-4pmxr [740.170969ms] +Aug 24 12:49:42.461: INFO: Created: latency-svc-2frd7 +Aug 24 12:49:42.499: INFO: Got endpoints: latency-svc-9cgvj [752.447734ms] +Aug 24 12:49:42.530: INFO: Created: latency-svc-xz6sh +Aug 24 12:49:42.550: INFO: Got endpoints: latency-svc-hprcf [753.024943ms] +Aug 24 12:49:42.569: INFO: Created: latency-svc-5q4vg +Aug 24 12:49:42.599: INFO: Got endpoints: latency-svc-h59vd [750.879335ms] +Aug 24 12:49:42.622: INFO: Created: latency-svc-hr77d +Aug 24 12:49:42.646: INFO: Got endpoints: latency-svc-fxzp4 [745.462525ms] +Aug 24 12:49:42.707: INFO: Got endpoints: latency-svc-l78qs [756.057255ms] +Aug 24 12:49:42.718: INFO: Created: latency-svc-n7zd5 +Aug 24 12:49:42.746: INFO: Created: latency-svc-z2gfh +Aug 24 12:49:42.751: INFO: Got endpoints: latency-svc-jdf4t [758.676096ms] +Aug 24 12:49:42.774: INFO: Created: latency-svc-9xmn9 +Aug 24 12:49:42.807: INFO: Got endpoints: latency-svc-m99q8 [749.83088ms] +Aug 24 12:49:42.852: INFO: Created: latency-svc-sgtkq +Aug 24 12:49:42.865: INFO: Got endpoints: latency-svc-v2gk8 [756.461061ms] +Aug 24 12:49:42.889: INFO: Created: latency-svc-6khsb +Aug 24 12:49:42.898: INFO: Got endpoints: latency-svc-txcpg [749.576614ms] +Aug 24 12:49:42.923: INFO: Created: latency-svc-vcwz5 +Aug 24 12:49:42.950: INFO: Got endpoints: latency-svc-xk89s [741.281238ms] +Aug 24 12:49:42.973: INFO: Created: latency-svc-gbrqx +Aug 24 12:49:42.994: INFO: Got endpoints: latency-svc-pxlkb [736.268587ms] +Aug 24 12:49:43.020: INFO: Created: latency-svc-jc6k7 +Aug 24 12:49:43.055: INFO: Got endpoints: latency-svc-xlql7 [754.968091ms] +Aug 24 12:49:43.078: INFO: Created: latency-svc-nql7r +Aug 24 12:49:43.103: INFO: Got endpoints: latency-svc-7br9g [752.897191ms] +Aug 24 12:49:43.126: INFO: Created: latency-svc-kcmpw +Aug 24 12:49:43.152: INFO: Got endpoints: latency-svc-2h4m5 [752.467021ms] +Aug 24 12:49:43.170: INFO: Created: latency-svc-xzbw4 +Aug 24 12:49:43.197: INFO: Got endpoints: latency-svc-2frd7 [754.149943ms] +Aug 24 12:49:43.220: INFO: Created: latency-svc-4r46w +Aug 24 12:49:43.247: INFO: Got endpoints: latency-svc-xz6sh [747.252052ms] +Aug 24 12:49:43.270: INFO: Created: latency-svc-hv7xk +Aug 24 12:49:43.297: INFO: Got endpoints: latency-svc-5q4vg [746.171399ms] +Aug 24 12:49:43.316: INFO: Created: latency-svc-q7q5c +Aug 24 12:49:43.351: INFO: Got endpoints: latency-svc-hr77d [750.983062ms] +Aug 24 12:49:43.388: INFO: Created: latency-svc-tqshm +Aug 24 12:49:43.433: INFO: Got endpoints: latency-svc-n7zd5 [786.630555ms] +Aug 24 12:49:43.447: INFO: Got endpoints: latency-svc-z2gfh [739.644572ms] +Aug 24 12:49:43.474: INFO: Created: latency-svc-kg7nq +Aug 24 12:49:43.486: INFO: Created: latency-svc-7qtxw +Aug 24 12:49:43.498: INFO: Got endpoints: latency-svc-9xmn9 [746.96027ms] +Aug 24 12:49:43.526: INFO: Created: latency-svc-w7czn +Aug 24 12:49:43.543: INFO: Got endpoints: latency-svc-sgtkq [735.955632ms] +Aug 24 12:49:43.565: INFO: Created: latency-svc-h68hb +Aug 24 12:49:43.605: INFO: Got endpoints: latency-svc-6khsb [739.828753ms] +Aug 24 12:49:43.628: INFO: Created: latency-svc-ggtpz +Aug 24 12:49:43.650: INFO: Got endpoints: latency-svc-vcwz5 [751.903141ms] +Aug 24 12:49:43.678: INFO: Created: latency-svc-rjg8m +Aug 24 12:49:43.704: INFO: Got endpoints: latency-svc-gbrqx [753.434976ms] +Aug 24 12:49:43.729: INFO: Created: latency-svc-5x2gz +Aug 24 12:49:43.753: INFO: Got endpoints: latency-svc-jc6k7 [759.513432ms] +Aug 24 12:49:43.789: INFO: Created: latency-svc-dsfcp +Aug 24 12:49:43.805: INFO: Got endpoints: latency-svc-nql7r [749.79641ms] +Aug 24 12:49:43.834: INFO: Created: latency-svc-929rk +Aug 24 12:49:43.845: INFO: Got endpoints: latency-svc-kcmpw [741.353014ms] +Aug 24 12:49:43.876: INFO: Created: latency-svc-2q628 +Aug 24 12:49:43.897: INFO: Got endpoints: latency-svc-xzbw4 [744.76965ms] +Aug 24 12:49:43.925: INFO: Created: latency-svc-mh9cq +Aug 24 12:49:43.947: INFO: Got endpoints: latency-svc-4r46w [750.304479ms] +Aug 24 12:49:43.972: INFO: Created: latency-svc-g7grs +Aug 24 12:49:44.003: INFO: Got endpoints: latency-svc-hv7xk [755.463143ms] +Aug 24 12:49:44.034: INFO: Created: latency-svc-lflqk +Aug 24 12:49:44.058: INFO: Got endpoints: latency-svc-q7q5c [761.426379ms] +Aug 24 12:49:44.093: INFO: Created: latency-svc-jdwxq +Aug 24 12:49:44.097: INFO: Got endpoints: latency-svc-tqshm [745.363757ms] +Aug 24 12:49:44.124: INFO: Created: latency-svc-hszqz +Aug 24 12:49:44.158: INFO: Got endpoints: latency-svc-kg7nq [725.583608ms] +Aug 24 12:49:44.180: INFO: Created: latency-svc-drq8k +Aug 24 12:49:44.194: INFO: Got endpoints: latency-svc-7qtxw [747.369181ms] +Aug 24 12:49:44.240: INFO: Created: latency-svc-2892g +Aug 24 12:49:44.254: INFO: Got endpoints: latency-svc-w7czn [755.818828ms] +Aug 24 12:49:44.272: INFO: Created: latency-svc-qxjhd +Aug 24 12:49:44.296: INFO: Got endpoints: latency-svc-h68hb [752.325984ms] +Aug 24 12:49:44.315: INFO: Created: latency-svc-nd78f +Aug 24 12:49:44.359: INFO: Got endpoints: latency-svc-ggtpz [753.546815ms] +Aug 24 12:49:44.379: INFO: Created: latency-svc-66rlh +Aug 24 12:49:44.405: INFO: Got endpoints: latency-svc-rjg8m [754.524584ms] +Aug 24 12:49:44.438: INFO: Created: latency-svc-97xcb +Aug 24 12:49:44.449: INFO: Got endpoints: latency-svc-5x2gz [745.244082ms] +Aug 24 12:49:44.477: INFO: Created: latency-svc-6pwhl +Aug 24 12:49:44.502: INFO: Got endpoints: latency-svc-dsfcp [747.803982ms] +Aug 24 12:49:44.529: INFO: Created: latency-svc-m2lrw +Aug 24 12:49:44.550: INFO: Got endpoints: latency-svc-929rk [744.596411ms] +Aug 24 12:49:44.571: INFO: Created: latency-svc-c925k +Aug 24 12:49:44.600: INFO: Got endpoints: latency-svc-2q628 [754.73698ms] +Aug 24 12:49:44.623: INFO: Created: latency-svc-jd24p +Aug 24 12:49:44.648: INFO: Got endpoints: latency-svc-mh9cq [751.037493ms] +Aug 24 12:49:44.670: INFO: Created: latency-svc-wkm9h +Aug 24 12:49:44.695: INFO: Got endpoints: latency-svc-g7grs [748.161432ms] +Aug 24 12:49:44.728: INFO: Created: latency-svc-f48jq +Aug 24 12:49:44.749: INFO: Got endpoints: latency-svc-lflqk [746.388308ms] +Aug 24 12:49:44.770: INFO: Created: latency-svc-hs8z4 +Aug 24 12:49:44.796: INFO: Got endpoints: latency-svc-jdwxq [737.443036ms] +Aug 24 12:49:44.818: INFO: Created: latency-svc-bt6x2 +Aug 24 12:49:44.850: INFO: Got endpoints: latency-svc-hszqz [752.852564ms] +Aug 24 12:49:44.887: INFO: Created: latency-svc-dq2jh +Aug 24 12:49:44.896: INFO: Got endpoints: latency-svc-drq8k [736.948578ms] +Aug 24 12:49:44.913: INFO: Created: latency-svc-xtsbz +Aug 24 12:49:44.950: INFO: Got endpoints: latency-svc-2892g [755.445596ms] +Aug 24 12:49:44.978: INFO: Created: latency-svc-qk2n7 +Aug 24 12:49:44.995: INFO: Got endpoints: latency-svc-qxjhd [740.452097ms] +Aug 24 12:49:45.019: INFO: Created: latency-svc-j87hp +Aug 24 12:49:45.048: INFO: Got endpoints: latency-svc-nd78f [752.085646ms] +Aug 24 12:49:45.067: INFO: Created: latency-svc-2b8rl +Aug 24 12:49:45.099: INFO: Got endpoints: latency-svc-66rlh [739.046774ms] +Aug 24 12:49:45.158: INFO: Got endpoints: latency-svc-97xcb [752.557387ms] +Aug 24 12:49:45.171: INFO: Created: latency-svc-zct75 +Aug 24 12:49:45.191: INFO: Created: latency-svc-wwblm +Aug 24 12:49:45.199: INFO: Got endpoints: latency-svc-6pwhl [749.246766ms] +Aug 24 12:49:45.226: INFO: Created: latency-svc-fct7b +Aug 24 12:49:45.245: INFO: Got endpoints: latency-svc-m2lrw [742.048931ms] +Aug 24 12:49:45.267: INFO: Created: latency-svc-498gp +Aug 24 12:49:45.311: INFO: Got endpoints: latency-svc-c925k [761.795133ms] +Aug 24 12:49:45.332: INFO: Created: latency-svc-sgh82 +Aug 24 12:49:45.350: INFO: Got endpoints: latency-svc-jd24p [750.068854ms] +Aug 24 12:49:45.373: INFO: Created: latency-svc-xxd99 +Aug 24 12:49:45.398: INFO: Got endpoints: latency-svc-wkm9h [749.147848ms] +Aug 24 12:49:45.430: INFO: Created: latency-svc-ldrxd +Aug 24 12:49:45.447: INFO: Got endpoints: latency-svc-f48jq [751.105578ms] +Aug 24 12:49:45.473: INFO: Created: latency-svc-xgjx9 +Aug 24 12:49:45.501: INFO: Got endpoints: latency-svc-hs8z4 [751.601103ms] +Aug 24 12:49:45.524: INFO: Created: latency-svc-hp4d6 +Aug 24 12:49:45.549: INFO: Got endpoints: latency-svc-bt6x2 [753.324886ms] +Aug 24 12:49:45.570: INFO: Created: latency-svc-jftmq +Aug 24 12:49:45.594: INFO: Got endpoints: latency-svc-dq2jh [743.432288ms] +Aug 24 12:49:45.619: INFO: Created: latency-svc-smvpq +Aug 24 12:49:45.647: INFO: Got endpoints: latency-svc-xtsbz [750.772838ms] +Aug 24 12:49:45.671: INFO: Created: latency-svc-2wcwp +Aug 24 12:49:45.718: INFO: Got endpoints: latency-svc-qk2n7 [767.378734ms] +Aug 24 12:49:45.739: INFO: Created: latency-svc-2ch44 +Aug 24 12:49:45.754: INFO: Got endpoints: latency-svc-j87hp [758.924222ms] +Aug 24 12:49:45.787: INFO: Created: latency-svc-c7z45 +Aug 24 12:49:45.813: INFO: Got endpoints: latency-svc-2b8rl [763.926802ms] +Aug 24 12:49:45.848: INFO: Created: latency-svc-x2x6f +Aug 24 12:49:45.855: INFO: Got endpoints: latency-svc-zct75 [756.182524ms] +Aug 24 12:49:45.913: INFO: Created: latency-svc-h28gh +Aug 24 12:49:45.914: INFO: Got endpoints: latency-svc-wwblm [756.183161ms] +Aug 24 12:49:45.941: INFO: Created: latency-svc-vq79v +Aug 24 12:49:45.985: INFO: Got endpoints: latency-svc-fct7b [785.796909ms] +Aug 24 12:49:46.004: INFO: Got endpoints: latency-svc-498gp [758.129698ms] +Aug 24 12:49:46.015: INFO: Created: latency-svc-t252r +Aug 24 12:49:46.034: INFO: Created: latency-svc-j4xgp +Aug 24 12:49:46.050: INFO: Got endpoints: latency-svc-sgh82 [738.182833ms] +Aug 24 12:49:46.083: INFO: Created: latency-svc-g7mhd +Aug 24 12:49:46.105: INFO: Got endpoints: latency-svc-xxd99 [754.273066ms] +Aug 24 12:49:46.129: INFO: Created: latency-svc-vrvw8 +Aug 24 12:49:46.161: INFO: Got endpoints: latency-svc-ldrxd [762.750453ms] +Aug 24 12:49:46.186: INFO: Created: latency-svc-26qlb +Aug 24 12:49:46.195: INFO: Got endpoints: latency-svc-xgjx9 [748.424692ms] +Aug 24 12:49:46.225: INFO: Created: latency-svc-lrd4s +Aug 24 12:49:46.248: INFO: Got endpoints: latency-svc-hp4d6 [746.357612ms] +Aug 24 12:49:46.283: INFO: Created: latency-svc-wvrqs +Aug 24 12:49:46.296: INFO: Got endpoints: latency-svc-jftmq [746.142966ms] +Aug 24 12:49:46.315: INFO: Created: latency-svc-c6rhv +Aug 24 12:49:46.350: INFO: Got endpoints: latency-svc-smvpq [756.73964ms] +Aug 24 12:49:46.373: INFO: Created: latency-svc-mgpjc +Aug 24 12:49:46.397: INFO: Got endpoints: latency-svc-2wcwp [749.739289ms] +Aug 24 12:49:46.413: INFO: Created: latency-svc-8mbbv +Aug 24 12:49:46.447: INFO: Got endpoints: latency-svc-2ch44 [729.113656ms] +Aug 24 12:49:46.493: INFO: Got endpoints: latency-svc-c7z45 [739.214166ms] +Aug 24 12:49:46.549: INFO: Got endpoints: latency-svc-x2x6f [736.601131ms] +Aug 24 12:49:46.596: INFO: Got endpoints: latency-svc-h28gh [736.094386ms] +Aug 24 12:49:46.648: INFO: Got endpoints: latency-svc-vq79v [733.823164ms] +Aug 24 12:49:46.699: INFO: Got endpoints: latency-svc-t252r [713.915127ms] +Aug 24 12:49:46.745: INFO: Got endpoints: latency-svc-j4xgp [740.854057ms] +Aug 24 12:49:46.820: INFO: Got endpoints: latency-svc-g7mhd [769.818609ms] +Aug 24 12:49:46.851: INFO: Got endpoints: latency-svc-vrvw8 [746.065416ms] +Aug 24 12:49:46.903: INFO: Got endpoints: latency-svc-26qlb [742.370763ms] +Aug 24 12:49:46.956: INFO: Got endpoints: latency-svc-lrd4s [760.059827ms] +Aug 24 12:49:47.003: INFO: Got endpoints: latency-svc-wvrqs [754.527854ms] +Aug 24 12:49:47.058: INFO: Got endpoints: latency-svc-c6rhv [762.441084ms] +Aug 24 12:49:47.099: INFO: Got endpoints: latency-svc-mgpjc [748.122958ms] +Aug 24 12:49:47.156: INFO: Got endpoints: latency-svc-8mbbv [758.732316ms] +Aug 24 12:49:47.156: INFO: Latencies: [56.063584ms 84.157523ms 114.472563ms 125.146115ms 126.951742ms 143.417106ms 151.579914ms 156.814153ms 165.530931ms 168.904394ms 170.228686ms 183.093192ms 212.587727ms 219.626729ms 226.139178ms 232.327456ms 233.126352ms 243.49353ms 254.217879ms 266.254622ms 267.366666ms 267.892004ms 283.06359ms 290.706541ms 296.066397ms 297.939326ms 324.455077ms 327.216494ms 334.957207ms 341.10324ms 358.332465ms 364.161732ms 368.862262ms 369.231289ms 395.646699ms 399.618346ms 403.464416ms 406.392154ms 419.678946ms 420.848329ms 421.873402ms 422.2223ms 431.110147ms 438.756726ms 439.362204ms 442.562917ms 443.892767ms 444.71012ms 449.721282ms 450.566273ms 451.511288ms 456.158586ms 459.622892ms 461.754553ms 462.625648ms 470.635399ms 473.879001ms 476.051439ms 476.162731ms 482.3205ms 513.680755ms 524.373733ms 525.576932ms 530.214258ms 555.98563ms 562.625256ms 589.786908ms 634.312931ms 673.462216ms 675.331847ms 680.53894ms 683.271252ms 695.078929ms 709.105444ms 709.640934ms 713.915127ms 725.583608ms 729.113656ms 730.239621ms 733.823164ms 735.955632ms 736.094386ms 736.268587ms 736.601131ms 736.948578ms 737.443036ms 738.182833ms 738.286619ms 739.046774ms 739.214166ms 739.644572ms 739.828753ms 739.944485ms 740.170969ms 740.452097ms 740.854057ms 741.281238ms 741.353014ms 742.048931ms 742.370763ms 742.85621ms 742.966587ms 743.040996ms 743.432288ms 744.152701ms 744.596411ms 744.76965ms 745.202476ms 745.244082ms 745.363757ms 745.388433ms 745.462525ms 745.832041ms 746.065416ms 746.142966ms 746.171399ms 746.248804ms 746.357612ms 746.388308ms 746.96027ms 747.252052ms 747.369181ms 747.613239ms 747.803982ms 748.122958ms 748.161432ms 748.424692ms 749.147848ms 749.246766ms 749.576614ms 749.689492ms 749.739289ms 749.79641ms 749.83088ms 750.068854ms 750.304479ms 750.772838ms 750.879335ms 750.983062ms 751.037493ms 751.105578ms 751.342216ms 751.601103ms 751.903141ms 751.972848ms 752.031469ms 752.085646ms 752.325984ms 752.447734ms 752.467021ms 752.557387ms 752.852564ms 752.897191ms 753.024943ms 753.324886ms 753.434976ms 753.546815ms 753.692722ms 754.149943ms 754.236635ms 754.273066ms 754.524584ms 754.527854ms 754.73698ms 754.894792ms 754.968091ms 755.004249ms 755.333375ms 755.445596ms 755.463143ms 755.818828ms 756.057255ms 756.182524ms 756.183161ms 756.328611ms 756.461061ms 756.73964ms 756.857646ms 756.892822ms 758.129698ms 758.19094ms 758.676096ms 758.732316ms 758.924222ms 759.513432ms 759.561923ms 760.059827ms 761.426379ms 761.795133ms 762.193337ms 762.441084ms 762.750453ms 762.980733ms 763.926802ms 767.378734ms 769.818609ms 785.796909ms 786.630555ms 844.367675ms 939.574799ms] +Aug 24 12:49:47.157: INFO: 50 %ile: 742.85621ms +Aug 24 12:49:47.157: INFO: 90 %ile: 758.19094ms +Aug 24 12:49:47.157: INFO: 99 %ile: 844.367675ms +Aug 24 12:49:47.157: INFO: Total sample count: 200 +[AfterEach] [sig-network] Service endpoints latency test/e2e/framework/node/init/init.go:32 -Jul 29 16:46:12.027: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Variable Expansion +Aug 24 12:49:47.157: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Service endpoints latency test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-network] Service endpoints latency dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Variable Expansion - tear down framework | framework.go:193 -STEP: Destroying namespace "var-expansion-5998" for this suite. 07/29/23 16:46:12.038 ------------------------------- -• [4.190 seconds] -[sig-node] Variable Expansion -test/e2e/common/node/framework.go:23 - should allow substituting values in a container's args [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:92 +[DeferCleanup (Each)] [sig-network] Service endpoints latency + tear down framework | framework.go:193 +STEP: Destroying namespace "svc-latency-2999" for this suite. 08/24/23 12:49:47.172 +------------------------------ +• [SLOW TEST] [10.853 seconds] +[sig-network] Service endpoints latency +test/e2e/network/common/framework.go:23 + should not be very high [Conformance] + test/e2e/network/service_latency.go:59 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Variable Expansion + [BeforeEach] [sig-network] Service endpoints latency set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:46:07.863 - Jul 29 16:46:07.864: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename var-expansion 07/29/23 16:46:07.866 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:46:07.898 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:46:07.903 - [BeforeEach] [sig-node] Variable Expansion + STEP: Creating a kubernetes client 08/24/23 12:49:36.334 + Aug 24 12:49:36.334: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename svc-latency 08/24/23 12:49:36.337 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:36.379 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:36.386 + [BeforeEach] [sig-network] Service endpoints latency test/e2e/framework/metrics/init/init.go:31 - [It] should allow substituting values in a container's args [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:92 - STEP: Creating a pod to test substitution in container's args 07/29/23 16:46:07.908 - Jul 29 16:46:07.938: INFO: Waiting up to 5m0s for pod "var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb" in namespace "var-expansion-5998" to be "Succeeded or Failed" - Jul 29 16:46:07.951: INFO: Pod "var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb": Phase="Pending", Reason="", readiness=false. Elapsed: 12.770583ms - Jul 29 16:46:09.959: INFO: Pod "var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020149564s - Jul 29 16:46:11.965: INFO: Pod "var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02608796s - STEP: Saw pod success 07/29/23 16:46:11.965 - Jul 29 16:46:11.965: INFO: Pod "var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb" satisfied condition "Succeeded or Failed" - Jul 29 16:46:11.971: INFO: Trying to get logs from node wetuj3nuajog-3 pod var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb container dapi-container: - STEP: delete the pod 07/29/23 16:46:11.993 - Jul 29 16:46:12.021: INFO: Waiting for pod var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb to disappear - Jul 29 16:46:12.027: INFO: Pod var-expansion-72276574-5efc-41ac-a7ce-bd11900ccddb no longer exists - [AfterEach] [sig-node] Variable Expansion + [It] should not be very high [Conformance] + test/e2e/network/service_latency.go:59 + Aug 24 12:49:36.391: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: creating replication controller svc-latency-rc in namespace svc-latency-2999 08/24/23 12:49:36.393 + I0824 12:49:36.405795 14 runners.go:193] Created replication controller with name: svc-latency-rc, namespace: svc-latency-2999, replica count: 1 + I0824 12:49:37.457760 14 runners.go:193] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + I0824 12:49:38.458412 14 runners.go:193] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + Aug 24 12:49:38.582: INFO: Created: latency-svc-9cb5k + Aug 24 12:49:38.604: INFO: Got endpoints: latency-svc-9cb5k [43.942554ms] + Aug 24 12:49:38.640: INFO: Created: latency-svc-qqqhr + Aug 24 12:49:38.660: INFO: Got endpoints: latency-svc-qqqhr [56.063584ms] + Aug 24 12:49:38.675: INFO: Created: latency-svc-2llmf + Aug 24 12:49:38.690: INFO: Got endpoints: latency-svc-2llmf [84.157523ms] + Aug 24 12:49:38.698: INFO: Created: latency-svc-6lcfp + Aug 24 12:49:38.714: INFO: Created: latency-svc-zgpb7 + Aug 24 12:49:38.721: INFO: Got endpoints: latency-svc-6lcfp [114.472563ms] + Aug 24 12:49:38.733: INFO: Got endpoints: latency-svc-zgpb7 [125.146115ms] + Aug 24 12:49:38.736: INFO: Created: latency-svc-t42np + Aug 24 12:49:38.751: INFO: Created: latency-svc-6blnt + Aug 24 12:49:38.758: INFO: Got endpoints: latency-svc-t42np [151.579914ms] + Aug 24 12:49:38.765: INFO: Got endpoints: latency-svc-6blnt [156.814153ms] + Aug 24 12:49:38.777: INFO: Created: latency-svc-n2k2d + Aug 24 12:49:38.785: INFO: Got endpoints: latency-svc-n2k2d [170.228686ms] + Aug 24 12:49:38.808: INFO: Created: latency-svc-n5m44 + Aug 24 12:49:38.809: INFO: Created: latency-svc-bs9f9 + Aug 24 12:49:38.824: INFO: Got endpoints: latency-svc-bs9f9 [212.587727ms] + Aug 24 12:49:38.825: INFO: Created: latency-svc-w6gvq + Aug 24 12:49:38.830: INFO: Got endpoints: latency-svc-n5m44 [219.626729ms] + Aug 24 12:49:38.848: INFO: Got endpoints: latency-svc-w6gvq [233.126352ms] + Aug 24 12:49:38.857: INFO: Created: latency-svc-2vssk + Aug 24 12:49:38.859: INFO: Got endpoints: latency-svc-2vssk [243.49353ms] + Aug 24 12:49:38.863: INFO: Created: latency-svc-6v2lz + Aug 24 12:49:38.883: INFO: Got endpoints: latency-svc-6v2lz [267.892004ms] + Aug 24 12:49:38.892: INFO: Created: latency-svc-ks8mh + Aug 24 12:49:38.906: INFO: Got endpoints: latency-svc-ks8mh [290.706541ms] + Aug 24 12:49:39.115: INFO: Created: latency-svc-cgwfk + Aug 24 12:49:39.115: INFO: Created: latency-svc-ttl6v + Aug 24 12:49:39.116: INFO: Created: latency-svc-2pq94 + Aug 24 12:49:39.131: INFO: Created: latency-svc-qnbdz + Aug 24 12:49:39.131: INFO: Created: latency-svc-4j5jk + Aug 24 12:49:39.131: INFO: Created: latency-svc-4fmm7 + Aug 24 12:49:39.132: INFO: Created: latency-svc-9sgmk + Aug 24 12:49:39.132: INFO: Created: latency-svc-p76hw + Aug 24 12:49:39.135: INFO: Created: latency-svc-bpmlm + Aug 24 12:49:39.137: INFO: Created: latency-svc-xwxfq + Aug 24 12:49:39.137: INFO: Created: latency-svc-n7bpk + Aug 24 12:49:39.138: INFO: Created: latency-svc-zl9tb + Aug 24 12:49:39.140: INFO: Created: latency-svc-jq2hc + Aug 24 12:49:39.140: INFO: Created: latency-svc-tc2df + Aug 24 12:49:39.142: INFO: Created: latency-svc-8ntmw + Aug 24 12:49:39.172: INFO: Got endpoints: latency-svc-2pq94 [555.98563ms] + Aug 24 12:49:39.173: INFO: Got endpoints: latency-svc-4j5jk [267.366666ms] + Aug 24 12:49:39.178: INFO: Got endpoints: latency-svc-ttl6v [456.158586ms] + Aug 24 12:49:39.186: INFO: Got endpoints: latency-svc-cgwfk [420.848329ms] + Aug 24 12:49:39.215: INFO: Got endpoints: latency-svc-p76hw [524.373733ms] + Aug 24 12:49:39.226: INFO: Got endpoints: latency-svc-4fmm7 [395.646699ms] + Aug 24 12:49:39.227: INFO: Got endpoints: latency-svc-9sgmk [403.464416ms] + Aug 24 12:49:39.228: INFO: Got endpoints: latency-svc-qnbdz [369.231289ms] + Aug 24 12:49:39.232: INFO: Got endpoints: latency-svc-xwxfq [473.879001ms] + Aug 24 12:49:39.276: INFO: Created: latency-svc-b7x6g + Aug 24 12:49:39.293: INFO: Got endpoints: latency-svc-n7bpk [444.71012ms] + Aug 24 12:49:39.295: INFO: Got endpoints: latency-svc-zl9tb [634.312931ms] + Aug 24 12:49:39.296: INFO: Got endpoints: latency-svc-8ntmw [562.625256ms] + Aug 24 12:49:39.299: INFO: Got endpoints: latency-svc-bpmlm [513.680755ms] + Aug 24 12:49:39.299: INFO: Got endpoints: latency-svc-jq2hc [683.271252ms] + Aug 24 12:49:39.322: INFO: Got endpoints: latency-svc-tc2df [439.362204ms] + Aug 24 12:49:39.330: INFO: Created: latency-svc-jj5s8 + Aug 24 12:49:39.341: INFO: Got endpoints: latency-svc-b7x6g [168.904394ms] + Aug 24 12:49:39.342: INFO: Got endpoints: latency-svc-jj5s8 [126.951742ms] + Aug 24 12:49:39.363: INFO: Created: latency-svc-lgnq4 + Aug 24 12:49:39.369: INFO: Got endpoints: latency-svc-lgnq4 [143.417106ms] + Aug 24 12:49:39.380: INFO: Created: latency-svc-2nvb4 + Aug 24 12:49:39.404: INFO: Got endpoints: latency-svc-2nvb4 [226.139178ms] + Aug 24 12:49:39.424: INFO: Created: latency-svc-cqdzp + Aug 24 12:49:39.440: INFO: Got endpoints: latency-svc-cqdzp [254.217879ms] + Aug 24 12:49:39.496: INFO: Created: latency-svc-4n8sm + Aug 24 12:49:39.498: INFO: Got endpoints: latency-svc-4n8sm [324.455077ms] + Aug 24 12:49:39.517: INFO: Created: latency-svc-lr4bp + Aug 24 12:49:39.525: INFO: Got endpoints: latency-svc-lr4bp [297.939326ms] + Aug 24 12:49:39.547: INFO: Created: latency-svc-q7cw8 + Aug 24 12:49:39.560: INFO: Got endpoints: latency-svc-q7cw8 [327.216494ms] + Aug 24 12:49:39.574: INFO: Created: latency-svc-jdjkr + Aug 24 12:49:39.587: INFO: Got endpoints: latency-svc-jdjkr [358.332465ms] + Aug 24 12:49:39.615: INFO: Created: latency-svc-6k7sw + Aug 24 12:49:39.630: INFO: Got endpoints: latency-svc-6k7sw [334.957207ms] + Aug 24 12:49:39.638: INFO: Created: latency-svc-6ghm9 + Aug 24 12:49:39.664: INFO: Got endpoints: latency-svc-6ghm9 [364.161732ms] + Aug 24 12:49:39.682: INFO: Created: latency-svc-4pf74 + Aug 24 12:49:39.709: INFO: Created: latency-svc-s5zj4 + Aug 24 12:49:39.713: INFO: Got endpoints: latency-svc-4pf74 [419.678946ms] + Aug 24 12:49:39.722: INFO: Created: latency-svc-qw78k + Aug 24 12:49:39.734: INFO: Got endpoints: latency-svc-s5zj4 [438.756726ms] + Aug 24 12:49:39.755: INFO: Created: latency-svc-nfh8g + Aug 24 12:49:39.759: INFO: Got endpoints: latency-svc-qw78k [459.622892ms] + Aug 24 12:49:39.766: INFO: Got endpoints: latency-svc-nfh8g [443.892767ms] + Aug 24 12:49:39.786: INFO: Created: latency-svc-6qghf + Aug 24 12:49:39.797: INFO: Created: latency-svc-w7knl + Aug 24 12:49:39.803: INFO: Got endpoints: latency-svc-6qghf [461.754553ms] + Aug 24 12:49:39.818: INFO: Got endpoints: latency-svc-w7knl [476.051439ms] + Aug 24 12:49:39.840: INFO: Created: latency-svc-f9skq + Aug 24 12:49:39.846: INFO: Got endpoints: latency-svc-f9skq [476.162731ms] + Aug 24 12:49:39.994: INFO: Created: latency-svc-cm8pj + Aug 24 12:49:39.995: INFO: Created: latency-svc-kdtct + Aug 24 12:49:40.001: INFO: Created: latency-svc-8zw9x + Aug 24 12:49:40.002: INFO: Created: latency-svc-2bq8t + Aug 24 12:49:40.032: INFO: Created: latency-svc-jh87n + Aug 24 12:49:40.033: INFO: Created: latency-svc-cxjkm + Aug 24 12:49:40.034: INFO: Created: latency-svc-52g2z + Aug 24 12:49:40.034: INFO: Created: latency-svc-zg5fc + Aug 24 12:49:40.034: INFO: Created: latency-svc-n2r5t + Aug 24 12:49:40.035: INFO: Created: latency-svc-b46mh + Aug 24 12:49:40.034: INFO: Created: latency-svc-k7hjk + Aug 24 12:49:40.035: INFO: Created: latency-svc-bdlhc + Aug 24 12:49:40.036: INFO: Created: latency-svc-sbk4j + Aug 24 12:49:40.036: INFO: Created: latency-svc-nbrhv + Aug 24 12:49:40.037: INFO: Created: latency-svc-5s86b + Aug 24 12:49:40.086: INFO: Got endpoints: latency-svc-cm8pj [422.2223ms] + Aug 24 12:49:40.112: INFO: Got endpoints: latency-svc-k7hjk [266.254622ms] + Aug 24 12:49:40.113: INFO: Got endpoints: latency-svc-2bq8t [482.3205ms] + Aug 24 12:49:40.114: INFO: Got endpoints: latency-svc-kdtct [525.576932ms] + Aug 24 12:49:40.114: INFO: Got endpoints: latency-svc-8zw9x [673.462216ms] + Aug 24 12:49:40.135: INFO: Got endpoints: latency-svc-cxjkm [730.239621ms] + Aug 24 12:49:40.163: INFO: Got endpoints: latency-svc-jh87n [449.721282ms] + Aug 24 12:49:40.171: INFO: Created: latency-svc-vkkhs + Aug 24 12:49:40.203: INFO: Got endpoints: latency-svc-sbk4j [399.618346ms] + Aug 24 12:49:40.205: INFO: Got endpoints: latency-svc-5s86b [470.635399ms] + Aug 24 12:49:40.207: INFO: Got endpoints: latency-svc-n2r5t [709.105444ms] + Aug 24 12:49:40.209: INFO: Got endpoints: latency-svc-52g2z [442.562917ms] + Aug 24 12:49:40.215: INFO: Created: latency-svc-g56hc + Aug 24 12:49:40.221: INFO: Got endpoints: latency-svc-zg5fc [695.078929ms] + Aug 24 12:49:40.221: INFO: Got endpoints: latency-svc-b46mh [462.625648ms] + Aug 24 12:49:40.237: INFO: Created: latency-svc-qzqhl + Aug 24 12:49:40.240: INFO: Got endpoints: latency-svc-nbrhv [421.873402ms] + Aug 24 12:49:40.240: INFO: Got endpoints: latency-svc-bdlhc [680.53894ms] + Aug 24 12:49:40.249: INFO: Created: latency-svc-xdsdp + Aug 24 12:49:40.251: INFO: Got endpoints: latency-svc-vkkhs [165.530931ms] + Aug 24 12:49:40.261: INFO: Created: latency-svc-6p87w + Aug 24 12:49:40.271: INFO: Created: latency-svc-7d86s + Aug 24 12:49:40.296: INFO: Got endpoints: latency-svc-g56hc [183.093192ms] + Aug 24 12:49:40.346: INFO: Got endpoints: latency-svc-qzqhl [232.327456ms] + Aug 24 12:49:40.396: INFO: Got endpoints: latency-svc-xdsdp [283.06359ms] + Aug 24 12:49:40.430: INFO: Created: latency-svc-b9zg7 + Aug 24 12:49:40.430: INFO: Created: latency-svc-mn5zq + Aug 24 12:49:40.434: INFO: Created: latency-svc-cqd2t + Aug 24 12:49:40.455: INFO: Got endpoints: latency-svc-6p87w [341.10324ms] + Aug 24 12:49:40.466: INFO: Created: latency-svc-znb8r + Aug 24 12:49:40.479: INFO: Created: latency-svc-ksvf5 + Aug 24 12:49:40.479: INFO: Created: latency-svc-xkwcp + Aug 24 12:49:40.480: INFO: Created: latency-svc-zlnjs + Aug 24 12:49:40.480: INFO: Created: latency-svc-b8skg + Aug 24 12:49:40.481: INFO: Created: latency-svc-bfl6x + Aug 24 12:49:40.481: INFO: Created: latency-svc-9kc8c + Aug 24 12:49:40.482: INFO: Created: latency-svc-rzzr5 + Aug 24 12:49:40.491: INFO: Created: latency-svc-kvpxf + Aug 24 12:49:40.492: INFO: Created: latency-svc-xvbp6 + Aug 24 12:49:40.504: INFO: Got endpoints: latency-svc-7d86s [368.862262ms] + Aug 24 12:49:40.516: INFO: Created: latency-svc-56g2n + Aug 24 12:49:40.532: INFO: Created: latency-svc-jmp46 + Aug 24 12:49:40.548: INFO: Got endpoints: latency-svc-mn5zq [296.066397ms] + Aug 24 12:49:40.568: INFO: Created: latency-svc-rbb2n + Aug 24 12:49:40.595: INFO: Got endpoints: latency-svc-b9zg7 [431.110147ms] + Aug 24 12:49:40.613: INFO: Created: latency-svc-d2dsr + Aug 24 12:49:40.653: INFO: Got endpoints: latency-svc-cqd2t [450.566273ms] + Aug 24 12:49:40.677: INFO: Created: latency-svc-kp989 + Aug 24 12:49:40.703: INFO: Got endpoints: latency-svc-znb8r [406.392154ms] + Aug 24 12:49:40.733: INFO: Created: latency-svc-czrlm + Aug 24 12:49:40.752: INFO: Got endpoints: latency-svc-xvbp6 [530.214258ms] + Aug 24 12:49:40.779: INFO: Created: latency-svc-nttk4 + Aug 24 12:49:40.799: INFO: Got endpoints: latency-svc-kvpxf [589.786908ms] + Aug 24 12:49:40.821: INFO: Created: latency-svc-8rfmr + Aug 24 12:49:40.847: INFO: Got endpoints: latency-svc-zlnjs [451.511288ms] + Aug 24 12:49:40.875: INFO: Created: latency-svc-xsbls + Aug 24 12:49:40.896: INFO: Got endpoints: latency-svc-xkwcp [675.331847ms] + Aug 24 12:49:40.914: INFO: Created: latency-svc-54gxm + Aug 24 12:49:40.950: INFO: Got endpoints: latency-svc-rzzr5 [709.640934ms] + Aug 24 12:49:40.972: INFO: Created: latency-svc-4hdfk + Aug 24 12:49:40.994: INFO: Got endpoints: latency-svc-9kc8c [754.236635ms] + Aug 24 12:49:41.013: INFO: Created: latency-svc-sbt88 + Aug 24 12:49:41.052: INFO: Got endpoints: latency-svc-ksvf5 [844.367675ms] + Aug 24 12:49:41.082: INFO: Created: latency-svc-f42s8 + Aug 24 12:49:41.096: INFO: Got endpoints: latency-svc-b8skg [749.689492ms] + Aug 24 12:49:41.130: INFO: Created: latency-svc-fdpsf + Aug 24 12:49:41.145: INFO: Got endpoints: latency-svc-bfl6x [939.574799ms] + Aug 24 12:49:41.180: INFO: Created: latency-svc-khbt8 + Aug 24 12:49:41.195: INFO: Got endpoints: latency-svc-56g2n [739.944485ms] + Aug 24 12:49:41.225: INFO: Created: latency-svc-msjpv + Aug 24 12:49:41.247: INFO: Got endpoints: latency-svc-jmp46 [742.85621ms] + Aug 24 12:49:41.263: INFO: Created: latency-svc-rvlcq + Aug 24 12:49:41.294: INFO: Got endpoints: latency-svc-rbb2n [745.832041ms] + Aug 24 12:49:41.320: INFO: Created: latency-svc-zc8vx + Aug 24 12:49:41.353: INFO: Got endpoints: latency-svc-d2dsr [758.19094ms] + Aug 24 12:49:41.385: INFO: Created: latency-svc-n4s86 + Aug 24 12:49:41.392: INFO: Got endpoints: latency-svc-kp989 [738.286619ms] + Aug 24 12:49:41.429: INFO: Created: latency-svc-dktvt + Aug 24 12:49:41.446: INFO: Got endpoints: latency-svc-czrlm [742.966587ms] + Aug 24 12:49:41.473: INFO: Created: latency-svc-9dlw2 + Aug 24 12:49:41.497: INFO: Got endpoints: latency-svc-nttk4 [744.152701ms] + Aug 24 12:49:41.522: INFO: Created: latency-svc-gp4pk + Aug 24 12:49:41.542: INFO: Got endpoints: latency-svc-8rfmr [743.040996ms] + Aug 24 12:49:41.560: INFO: Created: latency-svc-wwljq + Aug 24 12:49:41.595: INFO: Got endpoints: latency-svc-xsbls [747.613239ms] + Aug 24 12:49:41.621: INFO: Created: latency-svc-56wvd + Aug 24 12:49:41.653: INFO: Got endpoints: latency-svc-54gxm [756.857646ms] + Aug 24 12:49:41.674: INFO: Created: latency-svc-6hqlh + Aug 24 12:49:41.702: INFO: Got endpoints: latency-svc-4hdfk [751.972848ms] + Aug 24 12:49:41.724: INFO: Created: latency-svc-4pmxr + Aug 24 12:49:41.746: INFO: Got endpoints: latency-svc-sbt88 [752.031469ms] + Aug 24 12:49:41.783: INFO: Created: latency-svc-9cgvj + Aug 24 12:49:41.797: INFO: Got endpoints: latency-svc-f42s8 [745.202476ms] + Aug 24 12:49:41.814: INFO: Created: latency-svc-hprcf + Aug 24 12:49:41.848: INFO: Got endpoints: latency-svc-fdpsf [751.342216ms] + Aug 24 12:49:41.874: INFO: Created: latency-svc-h59vd + Aug 24 12:49:41.900: INFO: Got endpoints: latency-svc-khbt8 [754.894792ms] + Aug 24 12:49:41.928: INFO: Created: latency-svc-fxzp4 + Aug 24 12:49:41.949: INFO: Got endpoints: latency-svc-msjpv [753.692722ms] + Aug 24 12:49:41.972: INFO: Created: latency-svc-l78qs + Aug 24 12:49:41.992: INFO: Got endpoints: latency-svc-rvlcq [745.388433ms] + Aug 24 12:49:42.015: INFO: Created: latency-svc-jdf4t + Aug 24 12:49:42.057: INFO: Got endpoints: latency-svc-zc8vx [762.980733ms] + Aug 24 12:49:42.109: INFO: Got endpoints: latency-svc-n4s86 [755.333375ms] + Aug 24 12:49:42.116: INFO: Created: latency-svc-m99q8 + Aug 24 12:49:42.132: INFO: Created: latency-svc-v2gk8 + Aug 24 12:49:42.148: INFO: Got endpoints: latency-svc-dktvt [756.328611ms] + Aug 24 12:49:42.209: INFO: Got endpoints: latency-svc-9dlw2 [762.193337ms] + Aug 24 12:49:42.218: INFO: Created: latency-svc-txcpg + Aug 24 12:49:42.245: INFO: Created: latency-svc-xk89s + Aug 24 12:49:42.257: INFO: Got endpoints: latency-svc-gp4pk [759.561923ms] + Aug 24 12:49:42.276: INFO: Created: latency-svc-pxlkb + Aug 24 12:49:42.299: INFO: Got endpoints: latency-svc-wwljq [756.892822ms] + Aug 24 12:49:42.318: INFO: Created: latency-svc-xlql7 + Aug 24 12:49:42.350: INFO: Got endpoints: latency-svc-56wvd [755.004249ms] + Aug 24 12:49:42.376: INFO: Created: latency-svc-7br9g + Aug 24 12:49:42.400: INFO: Got endpoints: latency-svc-6hqlh [746.248804ms] + Aug 24 12:49:42.417: INFO: Created: latency-svc-2h4m5 + Aug 24 12:49:42.442: INFO: Got endpoints: latency-svc-4pmxr [740.170969ms] + Aug 24 12:49:42.461: INFO: Created: latency-svc-2frd7 + Aug 24 12:49:42.499: INFO: Got endpoints: latency-svc-9cgvj [752.447734ms] + Aug 24 12:49:42.530: INFO: Created: latency-svc-xz6sh + Aug 24 12:49:42.550: INFO: Got endpoints: latency-svc-hprcf [753.024943ms] + Aug 24 12:49:42.569: INFO: Created: latency-svc-5q4vg + Aug 24 12:49:42.599: INFO: Got endpoints: latency-svc-h59vd [750.879335ms] + Aug 24 12:49:42.622: INFO: Created: latency-svc-hr77d + Aug 24 12:49:42.646: INFO: Got endpoints: latency-svc-fxzp4 [745.462525ms] + Aug 24 12:49:42.707: INFO: Got endpoints: latency-svc-l78qs [756.057255ms] + Aug 24 12:49:42.718: INFO: Created: latency-svc-n7zd5 + Aug 24 12:49:42.746: INFO: Created: latency-svc-z2gfh + Aug 24 12:49:42.751: INFO: Got endpoints: latency-svc-jdf4t [758.676096ms] + Aug 24 12:49:42.774: INFO: Created: latency-svc-9xmn9 + Aug 24 12:49:42.807: INFO: Got endpoints: latency-svc-m99q8 [749.83088ms] + Aug 24 12:49:42.852: INFO: Created: latency-svc-sgtkq + Aug 24 12:49:42.865: INFO: Got endpoints: latency-svc-v2gk8 [756.461061ms] + Aug 24 12:49:42.889: INFO: Created: latency-svc-6khsb + Aug 24 12:49:42.898: INFO: Got endpoints: latency-svc-txcpg [749.576614ms] + Aug 24 12:49:42.923: INFO: Created: latency-svc-vcwz5 + Aug 24 12:49:42.950: INFO: Got endpoints: latency-svc-xk89s [741.281238ms] + Aug 24 12:49:42.973: INFO: Created: latency-svc-gbrqx + Aug 24 12:49:42.994: INFO: Got endpoints: latency-svc-pxlkb [736.268587ms] + Aug 24 12:49:43.020: INFO: Created: latency-svc-jc6k7 + Aug 24 12:49:43.055: INFO: Got endpoints: latency-svc-xlql7 [754.968091ms] + Aug 24 12:49:43.078: INFO: Created: latency-svc-nql7r + Aug 24 12:49:43.103: INFO: Got endpoints: latency-svc-7br9g [752.897191ms] + Aug 24 12:49:43.126: INFO: Created: latency-svc-kcmpw + Aug 24 12:49:43.152: INFO: Got endpoints: latency-svc-2h4m5 [752.467021ms] + Aug 24 12:49:43.170: INFO: Created: latency-svc-xzbw4 + Aug 24 12:49:43.197: INFO: Got endpoints: latency-svc-2frd7 [754.149943ms] + Aug 24 12:49:43.220: INFO: Created: latency-svc-4r46w + Aug 24 12:49:43.247: INFO: Got endpoints: latency-svc-xz6sh [747.252052ms] + Aug 24 12:49:43.270: INFO: Created: latency-svc-hv7xk + Aug 24 12:49:43.297: INFO: Got endpoints: latency-svc-5q4vg [746.171399ms] + Aug 24 12:49:43.316: INFO: Created: latency-svc-q7q5c + Aug 24 12:49:43.351: INFO: Got endpoints: latency-svc-hr77d [750.983062ms] + Aug 24 12:49:43.388: INFO: Created: latency-svc-tqshm + Aug 24 12:49:43.433: INFO: Got endpoints: latency-svc-n7zd5 [786.630555ms] + Aug 24 12:49:43.447: INFO: Got endpoints: latency-svc-z2gfh [739.644572ms] + Aug 24 12:49:43.474: INFO: Created: latency-svc-kg7nq + Aug 24 12:49:43.486: INFO: Created: latency-svc-7qtxw + Aug 24 12:49:43.498: INFO: Got endpoints: latency-svc-9xmn9 [746.96027ms] + Aug 24 12:49:43.526: INFO: Created: latency-svc-w7czn + Aug 24 12:49:43.543: INFO: Got endpoints: latency-svc-sgtkq [735.955632ms] + Aug 24 12:49:43.565: INFO: Created: latency-svc-h68hb + Aug 24 12:49:43.605: INFO: Got endpoints: latency-svc-6khsb [739.828753ms] + Aug 24 12:49:43.628: INFO: Created: latency-svc-ggtpz + Aug 24 12:49:43.650: INFO: Got endpoints: latency-svc-vcwz5 [751.903141ms] + Aug 24 12:49:43.678: INFO: Created: latency-svc-rjg8m + Aug 24 12:49:43.704: INFO: Got endpoints: latency-svc-gbrqx [753.434976ms] + Aug 24 12:49:43.729: INFO: Created: latency-svc-5x2gz + Aug 24 12:49:43.753: INFO: Got endpoints: latency-svc-jc6k7 [759.513432ms] + Aug 24 12:49:43.789: INFO: Created: latency-svc-dsfcp + Aug 24 12:49:43.805: INFO: Got endpoints: latency-svc-nql7r [749.79641ms] + Aug 24 12:49:43.834: INFO: Created: latency-svc-929rk + Aug 24 12:49:43.845: INFO: Got endpoints: latency-svc-kcmpw [741.353014ms] + Aug 24 12:49:43.876: INFO: Created: latency-svc-2q628 + Aug 24 12:49:43.897: INFO: Got endpoints: latency-svc-xzbw4 [744.76965ms] + Aug 24 12:49:43.925: INFO: Created: latency-svc-mh9cq + Aug 24 12:49:43.947: INFO: Got endpoints: latency-svc-4r46w [750.304479ms] + Aug 24 12:49:43.972: INFO: Created: latency-svc-g7grs + Aug 24 12:49:44.003: INFO: Got endpoints: latency-svc-hv7xk [755.463143ms] + Aug 24 12:49:44.034: INFO: Created: latency-svc-lflqk + Aug 24 12:49:44.058: INFO: Got endpoints: latency-svc-q7q5c [761.426379ms] + Aug 24 12:49:44.093: INFO: Created: latency-svc-jdwxq + Aug 24 12:49:44.097: INFO: Got endpoints: latency-svc-tqshm [745.363757ms] + Aug 24 12:49:44.124: INFO: Created: latency-svc-hszqz + Aug 24 12:49:44.158: INFO: Got endpoints: latency-svc-kg7nq [725.583608ms] + Aug 24 12:49:44.180: INFO: Created: latency-svc-drq8k + Aug 24 12:49:44.194: INFO: Got endpoints: latency-svc-7qtxw [747.369181ms] + Aug 24 12:49:44.240: INFO: Created: latency-svc-2892g + Aug 24 12:49:44.254: INFO: Got endpoints: latency-svc-w7czn [755.818828ms] + Aug 24 12:49:44.272: INFO: Created: latency-svc-qxjhd + Aug 24 12:49:44.296: INFO: Got endpoints: latency-svc-h68hb [752.325984ms] + Aug 24 12:49:44.315: INFO: Created: latency-svc-nd78f + Aug 24 12:49:44.359: INFO: Got endpoints: latency-svc-ggtpz [753.546815ms] + Aug 24 12:49:44.379: INFO: Created: latency-svc-66rlh + Aug 24 12:49:44.405: INFO: Got endpoints: latency-svc-rjg8m [754.524584ms] + Aug 24 12:49:44.438: INFO: Created: latency-svc-97xcb + Aug 24 12:49:44.449: INFO: Got endpoints: latency-svc-5x2gz [745.244082ms] + Aug 24 12:49:44.477: INFO: Created: latency-svc-6pwhl + Aug 24 12:49:44.502: INFO: Got endpoints: latency-svc-dsfcp [747.803982ms] + Aug 24 12:49:44.529: INFO: Created: latency-svc-m2lrw + Aug 24 12:49:44.550: INFO: Got endpoints: latency-svc-929rk [744.596411ms] + Aug 24 12:49:44.571: INFO: Created: latency-svc-c925k + Aug 24 12:49:44.600: INFO: Got endpoints: latency-svc-2q628 [754.73698ms] + Aug 24 12:49:44.623: INFO: Created: latency-svc-jd24p + Aug 24 12:49:44.648: INFO: Got endpoints: latency-svc-mh9cq [751.037493ms] + Aug 24 12:49:44.670: INFO: Created: latency-svc-wkm9h + Aug 24 12:49:44.695: INFO: Got endpoints: latency-svc-g7grs [748.161432ms] + Aug 24 12:49:44.728: INFO: Created: latency-svc-f48jq + Aug 24 12:49:44.749: INFO: Got endpoints: latency-svc-lflqk [746.388308ms] + Aug 24 12:49:44.770: INFO: Created: latency-svc-hs8z4 + Aug 24 12:49:44.796: INFO: Got endpoints: latency-svc-jdwxq [737.443036ms] + Aug 24 12:49:44.818: INFO: Created: latency-svc-bt6x2 + Aug 24 12:49:44.850: INFO: Got endpoints: latency-svc-hszqz [752.852564ms] + Aug 24 12:49:44.887: INFO: Created: latency-svc-dq2jh + Aug 24 12:49:44.896: INFO: Got endpoints: latency-svc-drq8k [736.948578ms] + Aug 24 12:49:44.913: INFO: Created: latency-svc-xtsbz + Aug 24 12:49:44.950: INFO: Got endpoints: latency-svc-2892g [755.445596ms] + Aug 24 12:49:44.978: INFO: Created: latency-svc-qk2n7 + Aug 24 12:49:44.995: INFO: Got endpoints: latency-svc-qxjhd [740.452097ms] + Aug 24 12:49:45.019: INFO: Created: latency-svc-j87hp + Aug 24 12:49:45.048: INFO: Got endpoints: latency-svc-nd78f [752.085646ms] + Aug 24 12:49:45.067: INFO: Created: latency-svc-2b8rl + Aug 24 12:49:45.099: INFO: Got endpoints: latency-svc-66rlh [739.046774ms] + Aug 24 12:49:45.158: INFO: Got endpoints: latency-svc-97xcb [752.557387ms] + Aug 24 12:49:45.171: INFO: Created: latency-svc-zct75 + Aug 24 12:49:45.191: INFO: Created: latency-svc-wwblm + Aug 24 12:49:45.199: INFO: Got endpoints: latency-svc-6pwhl [749.246766ms] + Aug 24 12:49:45.226: INFO: Created: latency-svc-fct7b + Aug 24 12:49:45.245: INFO: Got endpoints: latency-svc-m2lrw [742.048931ms] + Aug 24 12:49:45.267: INFO: Created: latency-svc-498gp + Aug 24 12:49:45.311: INFO: Got endpoints: latency-svc-c925k [761.795133ms] + Aug 24 12:49:45.332: INFO: Created: latency-svc-sgh82 + Aug 24 12:49:45.350: INFO: Got endpoints: latency-svc-jd24p [750.068854ms] + Aug 24 12:49:45.373: INFO: Created: latency-svc-xxd99 + Aug 24 12:49:45.398: INFO: Got endpoints: latency-svc-wkm9h [749.147848ms] + Aug 24 12:49:45.430: INFO: Created: latency-svc-ldrxd + Aug 24 12:49:45.447: INFO: Got endpoints: latency-svc-f48jq [751.105578ms] + Aug 24 12:49:45.473: INFO: Created: latency-svc-xgjx9 + Aug 24 12:49:45.501: INFO: Got endpoints: latency-svc-hs8z4 [751.601103ms] + Aug 24 12:49:45.524: INFO: Created: latency-svc-hp4d6 + Aug 24 12:49:45.549: INFO: Got endpoints: latency-svc-bt6x2 [753.324886ms] + Aug 24 12:49:45.570: INFO: Created: latency-svc-jftmq + Aug 24 12:49:45.594: INFO: Got endpoints: latency-svc-dq2jh [743.432288ms] + Aug 24 12:49:45.619: INFO: Created: latency-svc-smvpq + Aug 24 12:49:45.647: INFO: Got endpoints: latency-svc-xtsbz [750.772838ms] + Aug 24 12:49:45.671: INFO: Created: latency-svc-2wcwp + Aug 24 12:49:45.718: INFO: Got endpoints: latency-svc-qk2n7 [767.378734ms] + Aug 24 12:49:45.739: INFO: Created: latency-svc-2ch44 + Aug 24 12:49:45.754: INFO: Got endpoints: latency-svc-j87hp [758.924222ms] + Aug 24 12:49:45.787: INFO: Created: latency-svc-c7z45 + Aug 24 12:49:45.813: INFO: Got endpoints: latency-svc-2b8rl [763.926802ms] + Aug 24 12:49:45.848: INFO: Created: latency-svc-x2x6f + Aug 24 12:49:45.855: INFO: Got endpoints: latency-svc-zct75 [756.182524ms] + Aug 24 12:49:45.913: INFO: Created: latency-svc-h28gh + Aug 24 12:49:45.914: INFO: Got endpoints: latency-svc-wwblm [756.183161ms] + Aug 24 12:49:45.941: INFO: Created: latency-svc-vq79v + Aug 24 12:49:45.985: INFO: Got endpoints: latency-svc-fct7b [785.796909ms] + Aug 24 12:49:46.004: INFO: Got endpoints: latency-svc-498gp [758.129698ms] + Aug 24 12:49:46.015: INFO: Created: latency-svc-t252r + Aug 24 12:49:46.034: INFO: Created: latency-svc-j4xgp + Aug 24 12:49:46.050: INFO: Got endpoints: latency-svc-sgh82 [738.182833ms] + Aug 24 12:49:46.083: INFO: Created: latency-svc-g7mhd + Aug 24 12:49:46.105: INFO: Got endpoints: latency-svc-xxd99 [754.273066ms] + Aug 24 12:49:46.129: INFO: Created: latency-svc-vrvw8 + Aug 24 12:49:46.161: INFO: Got endpoints: latency-svc-ldrxd [762.750453ms] + Aug 24 12:49:46.186: INFO: Created: latency-svc-26qlb + Aug 24 12:49:46.195: INFO: Got endpoints: latency-svc-xgjx9 [748.424692ms] + Aug 24 12:49:46.225: INFO: Created: latency-svc-lrd4s + Aug 24 12:49:46.248: INFO: Got endpoints: latency-svc-hp4d6 [746.357612ms] + Aug 24 12:49:46.283: INFO: Created: latency-svc-wvrqs + Aug 24 12:49:46.296: INFO: Got endpoints: latency-svc-jftmq [746.142966ms] + Aug 24 12:49:46.315: INFO: Created: latency-svc-c6rhv + Aug 24 12:49:46.350: INFO: Got endpoints: latency-svc-smvpq [756.73964ms] + Aug 24 12:49:46.373: INFO: Created: latency-svc-mgpjc + Aug 24 12:49:46.397: INFO: Got endpoints: latency-svc-2wcwp [749.739289ms] + Aug 24 12:49:46.413: INFO: Created: latency-svc-8mbbv + Aug 24 12:49:46.447: INFO: Got endpoints: latency-svc-2ch44 [729.113656ms] + Aug 24 12:49:46.493: INFO: Got endpoints: latency-svc-c7z45 [739.214166ms] + Aug 24 12:49:46.549: INFO: Got endpoints: latency-svc-x2x6f [736.601131ms] + Aug 24 12:49:46.596: INFO: Got endpoints: latency-svc-h28gh [736.094386ms] + Aug 24 12:49:46.648: INFO: Got endpoints: latency-svc-vq79v [733.823164ms] + Aug 24 12:49:46.699: INFO: Got endpoints: latency-svc-t252r [713.915127ms] + Aug 24 12:49:46.745: INFO: Got endpoints: latency-svc-j4xgp [740.854057ms] + Aug 24 12:49:46.820: INFO: Got endpoints: latency-svc-g7mhd [769.818609ms] + Aug 24 12:49:46.851: INFO: Got endpoints: latency-svc-vrvw8 [746.065416ms] + Aug 24 12:49:46.903: INFO: Got endpoints: latency-svc-26qlb [742.370763ms] + Aug 24 12:49:46.956: INFO: Got endpoints: latency-svc-lrd4s [760.059827ms] + Aug 24 12:49:47.003: INFO: Got endpoints: latency-svc-wvrqs [754.527854ms] + Aug 24 12:49:47.058: INFO: Got endpoints: latency-svc-c6rhv [762.441084ms] + Aug 24 12:49:47.099: INFO: Got endpoints: latency-svc-mgpjc [748.122958ms] + Aug 24 12:49:47.156: INFO: Got endpoints: latency-svc-8mbbv [758.732316ms] + Aug 24 12:49:47.156: INFO: Latencies: [56.063584ms 84.157523ms 114.472563ms 125.146115ms 126.951742ms 143.417106ms 151.579914ms 156.814153ms 165.530931ms 168.904394ms 170.228686ms 183.093192ms 212.587727ms 219.626729ms 226.139178ms 232.327456ms 233.126352ms 243.49353ms 254.217879ms 266.254622ms 267.366666ms 267.892004ms 283.06359ms 290.706541ms 296.066397ms 297.939326ms 324.455077ms 327.216494ms 334.957207ms 341.10324ms 358.332465ms 364.161732ms 368.862262ms 369.231289ms 395.646699ms 399.618346ms 403.464416ms 406.392154ms 419.678946ms 420.848329ms 421.873402ms 422.2223ms 431.110147ms 438.756726ms 439.362204ms 442.562917ms 443.892767ms 444.71012ms 449.721282ms 450.566273ms 451.511288ms 456.158586ms 459.622892ms 461.754553ms 462.625648ms 470.635399ms 473.879001ms 476.051439ms 476.162731ms 482.3205ms 513.680755ms 524.373733ms 525.576932ms 530.214258ms 555.98563ms 562.625256ms 589.786908ms 634.312931ms 673.462216ms 675.331847ms 680.53894ms 683.271252ms 695.078929ms 709.105444ms 709.640934ms 713.915127ms 725.583608ms 729.113656ms 730.239621ms 733.823164ms 735.955632ms 736.094386ms 736.268587ms 736.601131ms 736.948578ms 737.443036ms 738.182833ms 738.286619ms 739.046774ms 739.214166ms 739.644572ms 739.828753ms 739.944485ms 740.170969ms 740.452097ms 740.854057ms 741.281238ms 741.353014ms 742.048931ms 742.370763ms 742.85621ms 742.966587ms 743.040996ms 743.432288ms 744.152701ms 744.596411ms 744.76965ms 745.202476ms 745.244082ms 745.363757ms 745.388433ms 745.462525ms 745.832041ms 746.065416ms 746.142966ms 746.171399ms 746.248804ms 746.357612ms 746.388308ms 746.96027ms 747.252052ms 747.369181ms 747.613239ms 747.803982ms 748.122958ms 748.161432ms 748.424692ms 749.147848ms 749.246766ms 749.576614ms 749.689492ms 749.739289ms 749.79641ms 749.83088ms 750.068854ms 750.304479ms 750.772838ms 750.879335ms 750.983062ms 751.037493ms 751.105578ms 751.342216ms 751.601103ms 751.903141ms 751.972848ms 752.031469ms 752.085646ms 752.325984ms 752.447734ms 752.467021ms 752.557387ms 752.852564ms 752.897191ms 753.024943ms 753.324886ms 753.434976ms 753.546815ms 753.692722ms 754.149943ms 754.236635ms 754.273066ms 754.524584ms 754.527854ms 754.73698ms 754.894792ms 754.968091ms 755.004249ms 755.333375ms 755.445596ms 755.463143ms 755.818828ms 756.057255ms 756.182524ms 756.183161ms 756.328611ms 756.461061ms 756.73964ms 756.857646ms 756.892822ms 758.129698ms 758.19094ms 758.676096ms 758.732316ms 758.924222ms 759.513432ms 759.561923ms 760.059827ms 761.426379ms 761.795133ms 762.193337ms 762.441084ms 762.750453ms 762.980733ms 763.926802ms 767.378734ms 769.818609ms 785.796909ms 786.630555ms 844.367675ms 939.574799ms] + Aug 24 12:49:47.157: INFO: 50 %ile: 742.85621ms + Aug 24 12:49:47.157: INFO: 90 %ile: 758.19094ms + Aug 24 12:49:47.157: INFO: 99 %ile: 844.367675ms + Aug 24 12:49:47.157: INFO: Total sample count: 200 + [AfterEach] [sig-network] Service endpoints latency test/e2e/framework/node/init/init.go:32 - Jul 29 16:46:12.027: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Variable Expansion + Aug 24 12:49:47.157: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Service endpoints latency test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-network] Service endpoints latency dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-network] Service endpoints latency tear down framework | framework.go:193 - STEP: Destroying namespace "var-expansion-5998" for this suite. 07/29/23 16:46:12.038 + STEP: Destroying namespace "svc-latency-2999" for this suite. 08/24/23 12:49:47.172 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSS +SSSSSSS ------------------------------ -[sig-node] Probing container - should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:135 -[BeforeEach] [sig-node] Probing container +[sig-apps] Deployment + RecreateDeployment should delete old pods and create new ones [Conformance] + test/e2e/apps/deployment.go:113 +[BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:46:12.055 -Jul 29 16:46:12.055: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-probe 07/29/23 16:46:12.057 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:46:12.096 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:46:12.103 -[BeforeEach] [sig-node] Probing container +STEP: Creating a kubernetes client 08/24/23 12:49:47.188 +Aug 24 12:49:47.188: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename deployment 08/24/23 12:49:47.19 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:47.246 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:47.251 +[BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 -[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:135 -STEP: Creating pod busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2 in namespace container-probe-8758 07/29/23 16:46:12.109 -Jul 29 16:46:12.125: INFO: Waiting up to 5m0s for pod "busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2" in namespace "container-probe-8758" to be "not pending" -Jul 29 16:46:12.133: INFO: Pod "busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2": Phase="Pending", Reason="", readiness=false. Elapsed: 7.087783ms -Jul 29 16:46:14.143: INFO: Pod "busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2": Phase="Running", Reason="", readiness=true. Elapsed: 2.017614286s -Jul 29 16:46:14.143: INFO: Pod "busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2" satisfied condition "not pending" -Jul 29 16:46:14.143: INFO: Started pod busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2 in namespace container-probe-8758 -STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 16:46:14.143 -Jul 29 16:46:14.151: INFO: Initial restart count of pod busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2 is 0 -Jul 29 16:47:04.399: INFO: Restart count of pod container-probe-8758/busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2 is now 1 (50.247269638s elapsed) -STEP: deleting the pod 07/29/23 16:47:04.399 -[AfterEach] [sig-node] Probing container +[BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 +[It] RecreateDeployment should delete old pods and create new ones [Conformance] + test/e2e/apps/deployment.go:113 +Aug 24 12:49:47.265: INFO: Creating deployment "test-recreate-deployment" +Aug 24 12:49:47.287: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1 +Aug 24 12:49:47.305: INFO: deployment "test-recreate-deployment" doesn't have the required revision set +Aug 24 12:49:49.323: INFO: Waiting deployment "test-recreate-deployment" to complete +Aug 24 12:49:49.329: INFO: Triggering a new rollout for deployment "test-recreate-deployment" +Aug 24 12:49:49.345: INFO: Updating deployment test-recreate-deployment +Aug 24 12:49:49.345: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods +[AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 +Aug 24 12:49:49.530: INFO: Deployment "test-recreate-deployment": +&Deployment{ObjectMeta:{test-recreate-deployment deployment-1237 ddec4587-9af9-43ba-b44c-19d7ffe96f28 27887 2 2023-08-24 12:49:47 +0000 UTC map[name:sample-pod-3] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0053fc538 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2023-08-24 12:49:49 +0000 UTC,LastTransitionTime:2023-08-24 12:49:49 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "test-recreate-deployment-cff6dc657" is progressing.,LastUpdateTime:2023-08-24 12:49:49 +0000 UTC,LastTransitionTime:2023-08-24 12:49:47 +0000 UTC,},},ReadyReplicas:0,CollisionCount:nil,},} + +Aug 24 12:49:49.536: INFO: New ReplicaSet "test-recreate-deployment-cff6dc657" of Deployment "test-recreate-deployment": +&ReplicaSet{ObjectMeta:{test-recreate-deployment-cff6dc657 deployment-1237 1531e081-47a4-4588-b765-01d4f8d9a459 27883 1 2023-08-24 12:49:49 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-recreate-deployment ddec4587-9af9-43ba-b44c-19d7ffe96f28 0xc0052c3d00 0xc0052c3d01}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ddec4587-9af9-43ba-b44c-19d7ffe96f28\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: cff6dc657,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0052c3d98 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:49:49.536: INFO: All old ReplicaSets of Deployment "test-recreate-deployment": +Aug 24 12:49:49.537: INFO: &ReplicaSet{ObjectMeta:{test-recreate-deployment-795566c5cb deployment-1237 c1fa604a-627c-47d9-b206-b995bb271828 27874 2 2023-08-24 12:49:47 +0000 UTC map[name:sample-pod-3 pod-template-hash:795566c5cb] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-recreate-deployment ddec4587-9af9-43ba-b44c-19d7ffe96f28 0xc0052c3be7 0xc0052c3be8}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ddec4587-9af9-43ba-b44c-19d7ffe96f28\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 795566c5cb,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:795566c5cb] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0052c3c98 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:49:49.548: INFO: Pod "test-recreate-deployment-cff6dc657-z7glk" is not available: +&Pod{ObjectMeta:{test-recreate-deployment-cff6dc657-z7glk test-recreate-deployment-cff6dc657- deployment-1237 31479ae4-904d-4faa-aa06-6c9d7c4a1229 27885 0 2023-08-24 12:49:49 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[] [{apps/v1 ReplicaSet test-recreate-deployment-cff6dc657 1531e081-47a4-4588-b765-01d4f8d9a459 0xc00545e210 0xc00545e211}] [] [{kube-controller-manager Update v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1531e081-47a4-4588-b765-01d4f8d9a459\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-874kp,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-874kp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:49:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:49:49 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:49:49 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:49:49 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:,StartTime:2023-08-24 12:49:49 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 -Jul 29 16:47:04.425: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Probing container +Aug 24 12:49:49.549: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 -STEP: Destroying namespace "container-probe-8758" for this suite. 07/29/23 16:47:04.439 +STEP: Destroying namespace "deployment-1237" for this suite. 08/24/23 12:49:49.573 ------------------------------ -• [SLOW TEST] [52.418 seconds] -[sig-node] Probing container -test/e2e/common/node/framework.go:23 - should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:135 +• [2.403 seconds] +[sig-apps] Deployment +test/e2e/apps/framework.go:23 + RecreateDeployment should delete old pods and create new ones [Conformance] + test/e2e/apps/deployment.go:113 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Probing container + [BeforeEach] [sig-apps] Deployment set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:46:12.055 - Jul 29 16:46:12.055: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-probe 07/29/23 16:46:12.057 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:46:12.096 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:46:12.103 - [BeforeEach] [sig-node] Probing container + STEP: Creating a kubernetes client 08/24/23 12:49:47.188 + Aug 24 12:49:47.188: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename deployment 08/24/23 12:49:47.19 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:47.246 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:47.251 + [BeforeEach] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 - [It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:135 - STEP: Creating pod busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2 in namespace container-probe-8758 07/29/23 16:46:12.109 - Jul 29 16:46:12.125: INFO: Waiting up to 5m0s for pod "busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2" in namespace "container-probe-8758" to be "not pending" - Jul 29 16:46:12.133: INFO: Pod "busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2": Phase="Pending", Reason="", readiness=false. Elapsed: 7.087783ms - Jul 29 16:46:14.143: INFO: Pod "busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2": Phase="Running", Reason="", readiness=true. Elapsed: 2.017614286s - Jul 29 16:46:14.143: INFO: Pod "busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2" satisfied condition "not pending" - Jul 29 16:46:14.143: INFO: Started pod busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2 in namespace container-probe-8758 - STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 16:46:14.143 - Jul 29 16:46:14.151: INFO: Initial restart count of pod busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2 is 0 - Jul 29 16:47:04.399: INFO: Restart count of pod container-probe-8758/busybox-a0b17d09-e5bc-4a62-aea7-70bd9953bad2 is now 1 (50.247269638s elapsed) - STEP: deleting the pod 07/29/23 16:47:04.399 - [AfterEach] [sig-node] Probing container + [BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 + [It] RecreateDeployment should delete old pods and create new ones [Conformance] + test/e2e/apps/deployment.go:113 + Aug 24 12:49:47.265: INFO: Creating deployment "test-recreate-deployment" + Aug 24 12:49:47.287: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1 + Aug 24 12:49:47.305: INFO: deployment "test-recreate-deployment" doesn't have the required revision set + Aug 24 12:49:49.323: INFO: Waiting deployment "test-recreate-deployment" to complete + Aug 24 12:49:49.329: INFO: Triggering a new rollout for deployment "test-recreate-deployment" + Aug 24 12:49:49.345: INFO: Updating deployment test-recreate-deployment + Aug 24 12:49:49.345: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods + [AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 + Aug 24 12:49:49.530: INFO: Deployment "test-recreate-deployment": + &Deployment{ObjectMeta:{test-recreate-deployment deployment-1237 ddec4587-9af9-43ba-b44c-19d7ffe96f28 27887 2 2023-08-24 12:49:47 +0000 UTC map[name:sample-pod-3] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0053fc538 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2023-08-24 12:49:49 +0000 UTC,LastTransitionTime:2023-08-24 12:49:49 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "test-recreate-deployment-cff6dc657" is progressing.,LastUpdateTime:2023-08-24 12:49:49 +0000 UTC,LastTransitionTime:2023-08-24 12:49:47 +0000 UTC,},},ReadyReplicas:0,CollisionCount:nil,},} + + Aug 24 12:49:49.536: INFO: New ReplicaSet "test-recreate-deployment-cff6dc657" of Deployment "test-recreate-deployment": + &ReplicaSet{ObjectMeta:{test-recreate-deployment-cff6dc657 deployment-1237 1531e081-47a4-4588-b765-01d4f8d9a459 27883 1 2023-08-24 12:49:49 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-recreate-deployment ddec4587-9af9-43ba-b44c-19d7ffe96f28 0xc0052c3d00 0xc0052c3d01}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ddec4587-9af9-43ba-b44c-19d7ffe96f28\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: cff6dc657,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0052c3d98 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:49:49.536: INFO: All old ReplicaSets of Deployment "test-recreate-deployment": + Aug 24 12:49:49.537: INFO: &ReplicaSet{ObjectMeta:{test-recreate-deployment-795566c5cb deployment-1237 c1fa604a-627c-47d9-b206-b995bb271828 27874 2 2023-08-24 12:49:47 +0000 UTC map[name:sample-pod-3 pod-template-hash:795566c5cb] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-recreate-deployment ddec4587-9af9-43ba-b44c-19d7ffe96f28 0xc0052c3be7 0xc0052c3be8}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"ddec4587-9af9-43ba-b44c-19d7ffe96f28\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 795566c5cb,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:795566c5cb] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0052c3c98 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:49:49.548: INFO: Pod "test-recreate-deployment-cff6dc657-z7glk" is not available: + &Pod{ObjectMeta:{test-recreate-deployment-cff6dc657-z7glk test-recreate-deployment-cff6dc657- deployment-1237 31479ae4-904d-4faa-aa06-6c9d7c4a1229 27885 0 2023-08-24 12:49:49 +0000 UTC map[name:sample-pod-3 pod-template-hash:cff6dc657] map[] [{apps/v1 ReplicaSet test-recreate-deployment-cff6dc657 1531e081-47a4-4588-b765-01d4f8d9a459 0xc00545e210 0xc00545e211}] [] [{kube-controller-manager Update v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1531e081-47a4-4588-b765-01d4f8d9a459\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:49:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-874kp,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-874kp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:49:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:49:49 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:49:49 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:49:49 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:,StartTime:2023-08-24 12:49:49 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} + [AfterEach] [sig-apps] Deployment test/e2e/framework/node/init/init.go:32 - Jul 29 16:47:04.425: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Probing container + Aug 24 12:49:49.549: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Deployment test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] [sig-apps] Deployment dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] [sig-apps] Deployment tear down framework | framework.go:193 - STEP: Destroying namespace "container-probe-8758" for this suite. 07/29/23 16:47:04.439 + STEP: Destroying namespace "deployment-1237" for this suite. 08/24/23 12:49:49.573 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSS +SSSSSSS ------------------------------ -[sig-apps] CronJob - should replace jobs when ReplaceConcurrent [Conformance] - test/e2e/apps/cronjob.go:160 -[BeforeEach] [sig-apps] CronJob +[sig-network] Services + should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] + test/e2e/network/service.go:2250 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:47:04.477 -Jul 29 16:47:04.477: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename cronjob 07/29/23 16:47:04.479 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:47:04.523 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:47:04.532 -[BeforeEach] [sig-apps] CronJob +STEP: Creating a kubernetes client 08/24/23 12:49:49.603 +Aug 24 12:49:49.604: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 12:49:49.616 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:49.641 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:49.646 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[It] should replace jobs when ReplaceConcurrent [Conformance] - test/e2e/apps/cronjob.go:160 -STEP: Creating a ReplaceConcurrent cronjob 07/29/23 16:47:04.54 -STEP: Ensuring a job is scheduled 07/29/23 16:47:04.553 -STEP: Ensuring exactly one is scheduled 07/29/23 16:48:00.561 -STEP: Ensuring exactly one running job exists by listing jobs explicitly 07/29/23 16:48:00.567 -STEP: Ensuring the job is replaced with a new one 07/29/23 16:48:00.573 -STEP: Removing cronjob 07/29/23 16:49:00.584 -[AfterEach] [sig-apps] CronJob +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] + test/e2e/network/service.go:2250 +STEP: creating service in namespace services-8936 08/24/23 12:49:49.651 +STEP: creating service affinity-nodeport-transition in namespace services-8936 08/24/23 12:49:49.651 +STEP: creating replication controller affinity-nodeport-transition in namespace services-8936 08/24/23 12:49:49.673 +I0824 12:49:49.690893 14 runners.go:193] Created replication controller with name: affinity-nodeport-transition, namespace: services-8936, replica count: 3 +I0824 12:49:52.742661 14 runners.go:193] affinity-nodeport-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Aug 24 12:49:52.764: INFO: Creating new exec pod +Aug 24 12:49:52.778: INFO: Waiting up to 5m0s for pod "execpod-affinitytd6g2" in namespace "services-8936" to be "running" +Aug 24 12:49:52.790: INFO: Pod "execpod-affinitytd6g2": Phase="Pending", Reason="", readiness=false. Elapsed: 11.503485ms +Aug 24 12:49:54.798: INFO: Pod "execpod-affinitytd6g2": Phase="Running", Reason="", readiness=true. Elapsed: 2.019656115s +Aug 24 12:49:54.798: INFO: Pod "execpod-affinitytd6g2" satisfied condition "running" +Aug 24 12:49:55.812: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c nc -v -z -w 2 affinity-nodeport-transition 80' +Aug 24 12:49:56.504: INFO: stderr: "+ nc -v -z -w 2 affinity-nodeport-transition 80\nConnection to affinity-nodeport-transition 80 port [tcp/http] succeeded!\n" +Aug 24 12:49:56.504: INFO: stdout: "" +Aug 24 12:49:56.506: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c nc -v -z -w 2 10.233.11.116 80' +Aug 24 12:49:57.076: INFO: stderr: "+ nc -v -z -w 2 10.233.11.116 80\nConnection to 10.233.11.116 80 port [tcp/http] succeeded!\n" +Aug 24 12:49:57.076: INFO: stdout: "" +Aug 24 12:49:57.077: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c nc -v -z -w 2 192.168.121.111 32222' +Aug 24 12:49:57.450: INFO: stderr: "+ nc -v -z -w 2 192.168.121.111 32222\nConnection to 192.168.121.111 32222 port [tcp/*] succeeded!\n" +Aug 24 12:49:57.450: INFO: stdout: "" +Aug 24 12:49:57.451: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c nc -v -z -w 2 192.168.121.127 32222' +Aug 24 12:49:57.939: INFO: stderr: "+ nc -v -z -w 2 192.168.121.127 32222\nConnection to 192.168.121.127 32222 port [tcp/*] succeeded!\n" +Aug 24 12:49:57.939: INFO: stdout: "" +Aug 24 12:49:57.963: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.127:32222/ ; done' +Aug 24 12:49:58.793: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n" +Aug 24 12:49:58.793: INFO: stdout: "\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-w552j\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-w552j\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-w552j\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-rtspk" +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-w552j +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-w552j +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-w552j +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd +Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:58.831: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.127:32222/ ; done' +Aug 24 12:49:59.741: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n" +Aug 24 12:49:59.745: INFO: stdout: "\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk" +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk +Aug 24 12:49:59.745: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-nodeport-transition in namespace services-8936, will wait for the garbage collector to delete the pods 08/24/23 12:49:59.803 +Aug 24 12:49:59.884: INFO: Deleting ReplicationController affinity-nodeport-transition took: 22.490455ms +Aug 24 12:50:00.886: INFO: Terminating ReplicationController affinity-nodeport-transition pods took: 1.001647703s +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 16:49:00.604: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] CronJob +Aug 24 12:50:03.346: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] CronJob +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] CronJob +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "cronjob-8500" for this suite. 07/29/23 16:49:00.619 +STEP: Destroying namespace "services-8936" for this suite. 08/24/23 12:50:03.36 ------------------------------ -• [SLOW TEST] [116.159 seconds] -[sig-apps] CronJob -test/e2e/apps/framework.go:23 - should replace jobs when ReplaceConcurrent [Conformance] - test/e2e/apps/cronjob.go:160 +• [SLOW TEST] [13.784 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] + test/e2e/network/service.go:2250 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] CronJob + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:47:04.477 - Jul 29 16:47:04.477: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename cronjob 07/29/23 16:47:04.479 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:47:04.523 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:47:04.532 - [BeforeEach] [sig-apps] CronJob + STEP: Creating a kubernetes client 08/24/23 12:49:49.603 + Aug 24 12:49:49.604: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 12:49:49.616 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:49:49.641 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:49:49.646 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [It] should replace jobs when ReplaceConcurrent [Conformance] - test/e2e/apps/cronjob.go:160 - STEP: Creating a ReplaceConcurrent cronjob 07/29/23 16:47:04.54 - STEP: Ensuring a job is scheduled 07/29/23 16:47:04.553 - STEP: Ensuring exactly one is scheduled 07/29/23 16:48:00.561 - STEP: Ensuring exactly one running job exists by listing jobs explicitly 07/29/23 16:48:00.567 - STEP: Ensuring the job is replaced with a new one 07/29/23 16:48:00.573 - STEP: Removing cronjob 07/29/23 16:49:00.584 - [AfterEach] [sig-apps] CronJob + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] + test/e2e/network/service.go:2250 + STEP: creating service in namespace services-8936 08/24/23 12:49:49.651 + STEP: creating service affinity-nodeport-transition in namespace services-8936 08/24/23 12:49:49.651 + STEP: creating replication controller affinity-nodeport-transition in namespace services-8936 08/24/23 12:49:49.673 + I0824 12:49:49.690893 14 runners.go:193] Created replication controller with name: affinity-nodeport-transition, namespace: services-8936, replica count: 3 + I0824 12:49:52.742661 14 runners.go:193] affinity-nodeport-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + Aug 24 12:49:52.764: INFO: Creating new exec pod + Aug 24 12:49:52.778: INFO: Waiting up to 5m0s for pod "execpod-affinitytd6g2" in namespace "services-8936" to be "running" + Aug 24 12:49:52.790: INFO: Pod "execpod-affinitytd6g2": Phase="Pending", Reason="", readiness=false. Elapsed: 11.503485ms + Aug 24 12:49:54.798: INFO: Pod "execpod-affinitytd6g2": Phase="Running", Reason="", readiness=true. Elapsed: 2.019656115s + Aug 24 12:49:54.798: INFO: Pod "execpod-affinitytd6g2" satisfied condition "running" + Aug 24 12:49:55.812: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c nc -v -z -w 2 affinity-nodeport-transition 80' + Aug 24 12:49:56.504: INFO: stderr: "+ nc -v -z -w 2 affinity-nodeport-transition 80\nConnection to affinity-nodeport-transition 80 port [tcp/http] succeeded!\n" + Aug 24 12:49:56.504: INFO: stdout: "" + Aug 24 12:49:56.506: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c nc -v -z -w 2 10.233.11.116 80' + Aug 24 12:49:57.076: INFO: stderr: "+ nc -v -z -w 2 10.233.11.116 80\nConnection to 10.233.11.116 80 port [tcp/http] succeeded!\n" + Aug 24 12:49:57.076: INFO: stdout: "" + Aug 24 12:49:57.077: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c nc -v -z -w 2 192.168.121.111 32222' + Aug 24 12:49:57.450: INFO: stderr: "+ nc -v -z -w 2 192.168.121.111 32222\nConnection to 192.168.121.111 32222 port [tcp/*] succeeded!\n" + Aug 24 12:49:57.450: INFO: stdout: "" + Aug 24 12:49:57.451: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c nc -v -z -w 2 192.168.121.127 32222' + Aug 24 12:49:57.939: INFO: stderr: "+ nc -v -z -w 2 192.168.121.127 32222\nConnection to 192.168.121.127 32222 port [tcp/*] succeeded!\n" + Aug 24 12:49:57.939: INFO: stdout: "" + Aug 24 12:49:57.963: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.127:32222/ ; done' + Aug 24 12:49:58.793: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n" + Aug 24 12:49:58.793: INFO: stdout: "\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-w552j\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-w552j\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-w552j\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-krljd\naffinity-nodeport-transition-rtspk" + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-w552j + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-w552j + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-w552j + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-krljd + Aug 24 12:49:58.793: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:58.831: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-8936 exec execpod-affinitytd6g2 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://192.168.121.127:32222/ ; done' + Aug 24 12:49:59.741: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n+ echo\n+ curl -q -s --connect-timeout 2 http://192.168.121.127:32222/\n" + Aug 24 12:49:59.745: INFO: stdout: "\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk\naffinity-nodeport-transition-rtspk" + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Received response from host: affinity-nodeport-transition-rtspk + Aug 24 12:49:59.745: INFO: Cleaning up the exec pod + STEP: deleting ReplicationController affinity-nodeport-transition in namespace services-8936, will wait for the garbage collector to delete the pods 08/24/23 12:49:59.803 + Aug 24 12:49:59.884: INFO: Deleting ReplicationController affinity-nodeport-transition took: 22.490455ms + Aug 24 12:50:00.886: INFO: Terminating ReplicationController affinity-nodeport-transition pods took: 1.001647703s + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 16:49:00.604: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] CronJob + Aug 24 12:50:03.346: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] CronJob + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] CronJob + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "cronjob-8500" for this suite. 07/29/23 16:49:00.619 + STEP: Destroying namespace "services-8936" for this suite. 08/24/23 12:50:03.36 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should create a ResourceQuota and capture the life of a replica set. [Conformance] - test/e2e/apimachinery/resource_quota.go:448 -[BeforeEach] [sig-api-machinery] ResourceQuota +[sig-node] InitContainer [NodeConformance] + should invoke init containers on a RestartAlways pod [Conformance] + test/e2e/common/node/init_container.go:255 +[BeforeEach] [sig-node] InitContainer [NodeConformance] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:49:00.638 -Jul 29 16:49:00.638: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 16:49:00.647 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:00.68 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:00.688 -[BeforeEach] [sig-api-machinery] ResourceQuota +STEP: Creating a kubernetes client 08/24/23 12:50:03.388 +Aug 24 12:50:03.388: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename init-container 08/24/23 12:50:03.401 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:50:03.447 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:50:03.454 +[BeforeEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:31 -[It] should create a ResourceQuota and capture the life of a replica set. [Conformance] - test/e2e/apimachinery/resource_quota.go:448 -STEP: Counting existing ResourceQuota 07/29/23 16:49:00.698 -STEP: Creating a ResourceQuota 07/29/23 16:49:05.715 -STEP: Ensuring resource quota status is calculated 07/29/23 16:49:05.746 -STEP: Creating a ReplicaSet 07/29/23 16:49:07.757 -STEP: Ensuring resource quota status captures replicaset creation 07/29/23 16:49:07.787 -STEP: Deleting a ReplicaSet 07/29/23 16:49:09.797 -STEP: Ensuring resource quota status released usage 07/29/23 16:49:09.81 -[AfterEach] [sig-api-machinery] ResourceQuota +[BeforeEach] [sig-node] InitContainer [NodeConformance] + test/e2e/common/node/init_container.go:165 +[It] should invoke init containers on a RestartAlways pod [Conformance] + test/e2e/common/node/init_container.go:255 +STEP: creating the pod 08/24/23 12:50:03.461 +Aug 24 12:50:03.462: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/node/init/init.go:32 -Jul 29 16:49:11.823: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +Aug 24 12:50:07.165: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-8876" for this suite. 07/29/23 16:49:11.838 +STEP: Destroying namespace "init-container-5226" for this suite. 08/24/23 12:50:07.179 ------------------------------ -• [SLOW TEST] [11.223 seconds] -[sig-api-machinery] ResourceQuota -test/e2e/apimachinery/framework.go:23 - should create a ResourceQuota and capture the life of a replica set. [Conformance] - test/e2e/apimachinery/resource_quota.go:448 +• [3.806 seconds] +[sig-node] InitContainer [NodeConformance] +test/e2e/common/node/framework.go:23 + should invoke init containers on a RestartAlways pod [Conformance] + test/e2e/common/node/init_container.go:255 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-node] InitContainer [NodeConformance] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:49:00.638 - Jul 29 16:49:00.638: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 16:49:00.647 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:00.68 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:00.688 - [BeforeEach] [sig-api-machinery] ResourceQuota + STEP: Creating a kubernetes client 08/24/23 12:50:03.388 + Aug 24 12:50:03.388: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename init-container 08/24/23 12:50:03.401 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:50:03.447 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:50:03.454 + [BeforeEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:31 - [It] should create a ResourceQuota and capture the life of a replica set. [Conformance] - test/e2e/apimachinery/resource_quota.go:448 - STEP: Counting existing ResourceQuota 07/29/23 16:49:00.698 - STEP: Creating a ResourceQuota 07/29/23 16:49:05.715 - STEP: Ensuring resource quota status is calculated 07/29/23 16:49:05.746 - STEP: Creating a ReplicaSet 07/29/23 16:49:07.757 - STEP: Ensuring resource quota status captures replicaset creation 07/29/23 16:49:07.787 - STEP: Deleting a ReplicaSet 07/29/23 16:49:09.797 - STEP: Ensuring resource quota status released usage 07/29/23 16:49:09.81 - [AfterEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-node] InitContainer [NodeConformance] + test/e2e/common/node/init_container.go:165 + [It] should invoke init containers on a RestartAlways pod [Conformance] + test/e2e/common/node/init_container.go:255 + STEP: creating the pod 08/24/23 12:50:03.461 + Aug 24 12:50:03.462: INFO: PodSpec: initContainers in spec.initContainers + [AfterEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/node/init/init.go:32 - Jul 29 16:49:11.823: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + Aug 24 12:50:07.165: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-8876" for this suite. 07/29/23 16:49:11.838 + STEP: Destroying namespace "init-container-5226" for this suite. 08/24/23 12:50:07.179 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSS ------------------------------ -[sig-node] PodTemplates - should delete a collection of pod templates [Conformance] - test/e2e/common/node/podtemplates.go:122 -[BeforeEach] [sig-node] PodTemplates +[sig-storage] Projected secret + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:78 +[BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:49:11.863 -Jul 29 16:49:11.863: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename podtemplate 07/29/23 16:49:11.866 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:11.956 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:11.963 -[BeforeEach] [sig-node] PodTemplates +STEP: Creating a kubernetes client 08/24/23 12:50:07.203 +Aug 24 12:50:07.204: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:50:07.205 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:50:07.236 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:50:07.241 +[BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 -[It] should delete a collection of pod templates [Conformance] - test/e2e/common/node/podtemplates.go:122 -STEP: Create set of pod templates 07/29/23 16:49:11.97 -Jul 29 16:49:11.984: INFO: created test-podtemplate-1 -Jul 29 16:49:11.997: INFO: created test-podtemplate-2 -Jul 29 16:49:12.009: INFO: created test-podtemplate-3 -STEP: get a list of pod templates with a label in the current namespace 07/29/23 16:49:12.009 -STEP: delete collection of pod templates 07/29/23 16:49:12.017 -Jul 29 16:49:12.018: INFO: requesting DeleteCollection of pod templates -STEP: check that the list of pod templates matches the requested quantity 07/29/23 16:49:12.062 -Jul 29 16:49:12.063: INFO: requesting list of pod templates to confirm quantity -[AfterEach] [sig-node] PodTemplates +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:78 +STEP: Creating projection with secret that has name projected-secret-test-map-f9b98797-562e-4578-baae-dc3b055f3e03 08/24/23 12:50:07.248 +STEP: Creating a pod to test consume secrets 08/24/23 12:50:07.256 +Aug 24 12:50:07.272: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a" in namespace "projected-7457" to be "Succeeded or Failed" +Aug 24 12:50:07.285: INFO: Pod "pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a": Phase="Pending", Reason="", readiness=false. Elapsed: 12.792048ms +Aug 24 12:50:09.293: INFO: Pod "pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020797044s +Aug 24 12:50:11.292: INFO: Pod "pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01951191s +STEP: Saw pod success 08/24/23 12:50:11.292 +Aug 24 12:50:11.292: INFO: Pod "pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a" satisfied condition "Succeeded or Failed" +Aug 24 12:50:11.297: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a container projected-secret-volume-test: +STEP: delete the pod 08/24/23 12:50:11.313 +Aug 24 12:50:11.335: INFO: Waiting for pod pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a to disappear +Aug 24 12:50:11.341: INFO: Pod pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a no longer exists +[AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 -Jul 29 16:49:12.087: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] PodTemplates +Aug 24 12:50:11.341: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] PodTemplates +[DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] PodTemplates +[DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 -STEP: Destroying namespace "podtemplate-912" for this suite. 07/29/23 16:49:12.102 +STEP: Destroying namespace "projected-7457" for this suite. 08/24/23 12:50:11.35 ------------------------------ -• [0.257 seconds] -[sig-node] PodTemplates -test/e2e/common/node/framework.go:23 - should delete a collection of pod templates [Conformance] - test/e2e/common/node/podtemplates.go:122 +• [4.172 seconds] +[sig-storage] Projected secret +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:78 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] PodTemplates + [BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:49:11.863 - Jul 29 16:49:11.863: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename podtemplate 07/29/23 16:49:11.866 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:11.956 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:11.963 - [BeforeEach] [sig-node] PodTemplates + STEP: Creating a kubernetes client 08/24/23 12:50:07.203 + Aug 24 12:50:07.204: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:50:07.205 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:50:07.236 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:50:07.241 + [BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 - [It] should delete a collection of pod templates [Conformance] - test/e2e/common/node/podtemplates.go:122 - STEP: Create set of pod templates 07/29/23 16:49:11.97 - Jul 29 16:49:11.984: INFO: created test-podtemplate-1 - Jul 29 16:49:11.997: INFO: created test-podtemplate-2 - Jul 29 16:49:12.009: INFO: created test-podtemplate-3 - STEP: get a list of pod templates with a label in the current namespace 07/29/23 16:49:12.009 - STEP: delete collection of pod templates 07/29/23 16:49:12.017 - Jul 29 16:49:12.018: INFO: requesting DeleteCollection of pod templates - STEP: check that the list of pod templates matches the requested quantity 07/29/23 16:49:12.062 - Jul 29 16:49:12.063: INFO: requesting list of pod templates to confirm quantity - [AfterEach] [sig-node] PodTemplates + [It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:78 + STEP: Creating projection with secret that has name projected-secret-test-map-f9b98797-562e-4578-baae-dc3b055f3e03 08/24/23 12:50:07.248 + STEP: Creating a pod to test consume secrets 08/24/23 12:50:07.256 + Aug 24 12:50:07.272: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a" in namespace "projected-7457" to be "Succeeded or Failed" + Aug 24 12:50:07.285: INFO: Pod "pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a": Phase="Pending", Reason="", readiness=false. Elapsed: 12.792048ms + Aug 24 12:50:09.293: INFO: Pod "pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020797044s + Aug 24 12:50:11.292: INFO: Pod "pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01951191s + STEP: Saw pod success 08/24/23 12:50:11.292 + Aug 24 12:50:11.292: INFO: Pod "pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a" satisfied condition "Succeeded or Failed" + Aug 24 12:50:11.297: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a container projected-secret-volume-test: + STEP: delete the pod 08/24/23 12:50:11.313 + Aug 24 12:50:11.335: INFO: Waiting for pod pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a to disappear + Aug 24 12:50:11.341: INFO: Pod pod-projected-secrets-e29cdcfc-f967-4a69-a76f-ff448ee99f8a no longer exists + [AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 - Jul 29 16:49:12.087: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] PodTemplates + Aug 24 12:50:11.341: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] PodTemplates + [DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] PodTemplates + [DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 - STEP: Destroying namespace "podtemplate-912" for this suite. 07/29/23 16:49:12.102 + STEP: Destroying namespace "projected-7457" for this suite. 08/24/23 12:50:11.35 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSS +SSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Pods - should get a host IP [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:204 -[BeforeEach] [sig-node] Pods +[sig-node] Variable Expansion + should allow substituting values in a volume subpath [Conformance] + test/e2e/common/node/expansion.go:112 +[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:49:12.122 -Jul 29 16:49:12.122: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 16:49:12.124 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:12.158 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:12.168 -[BeforeEach] [sig-node] Pods +STEP: Creating a kubernetes client 08/24/23 12:50:11.379 +Aug 24 12:50:11.380: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename var-expansion 08/24/23 12:50:11.382 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:50:11.417 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:50:11.423 +[BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 -[It] should get a host IP [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:204 -STEP: creating pod 07/29/23 16:49:12.175 -Jul 29 16:49:12.206: INFO: Waiting up to 5m0s for pod "pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe" in namespace "pods-3162" to be "running and ready" -Jul 29 16:49:12.219: INFO: Pod "pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe": Phase="Pending", Reason="", readiness=false. Elapsed: 13.536626ms -Jul 29 16:49:12.220: INFO: The phase of Pod pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:49:14.227: INFO: Pod "pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe": Phase="Running", Reason="", readiness=true. Elapsed: 2.021268467s -Jul 29 16:49:14.227: INFO: The phase of Pod pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe is Running (Ready = true) -Jul 29 16:49:14.227: INFO: Pod "pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe" satisfied condition "running and ready" -Jul 29 16:49:14.242: INFO: Pod pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe has hostIP: 192.168.121.141 -[AfterEach] [sig-node] Pods +[It] should allow substituting values in a volume subpath [Conformance] + test/e2e/common/node/expansion.go:112 +STEP: Creating a pod to test substitution in volume subpath 08/24/23 12:50:11.426 +Aug 24 12:50:11.464: INFO: Waiting up to 5m0s for pod "var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9" in namespace "var-expansion-7129" to be "Succeeded or Failed" +Aug 24 12:50:11.470: INFO: Pod "var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9": Phase="Pending", Reason="", readiness=false. Elapsed: 5.509366ms +Aug 24 12:50:13.478: INFO: Pod "var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013530736s +Aug 24 12:50:15.478: INFO: Pod "var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013468717s +STEP: Saw pod success 08/24/23 12:50:15.478 +Aug 24 12:50:15.479: INFO: Pod "var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9" satisfied condition "Succeeded or Failed" +Aug 24 12:50:15.484: INFO: Trying to get logs from node pe9deep4seen-3 pod var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9 container dapi-container: +STEP: delete the pod 08/24/23 12:50:15.494 +Aug 24 12:50:15.516: INFO: Waiting for pod var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9 to disappear +Aug 24 12:50:15.522: INFO: Pod var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9 no longer exists +[AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 -Jul 29 16:49:14.242: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods +Aug 24 12:50:15.522: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 -STEP: Destroying namespace "pods-3162" for this suite. 07/29/23 16:49:14.251 +STEP: Destroying namespace "var-expansion-7129" for this suite. 08/24/23 12:50:15.531 ------------------------------ -• [2.143 seconds] -[sig-node] Pods +• [4.166 seconds] +[sig-node] Variable Expansion test/e2e/common/node/framework.go:23 - should get a host IP [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:204 + should allow substituting values in a volume subpath [Conformance] + test/e2e/common/node/expansion.go:112 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods + [BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:49:12.122 - Jul 29 16:49:12.122: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 16:49:12.124 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:12.158 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:12.168 - [BeforeEach] [sig-node] Pods + STEP: Creating a kubernetes client 08/24/23 12:50:11.379 + Aug 24 12:50:11.380: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename var-expansion 08/24/23 12:50:11.382 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:50:11.417 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:50:11.423 + [BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 - [It] should get a host IP [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:204 - STEP: creating pod 07/29/23 16:49:12.175 - Jul 29 16:49:12.206: INFO: Waiting up to 5m0s for pod "pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe" in namespace "pods-3162" to be "running and ready" - Jul 29 16:49:12.219: INFO: Pod "pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe": Phase="Pending", Reason="", readiness=false. Elapsed: 13.536626ms - Jul 29 16:49:12.220: INFO: The phase of Pod pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:49:14.227: INFO: Pod "pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe": Phase="Running", Reason="", readiness=true. Elapsed: 2.021268467s - Jul 29 16:49:14.227: INFO: The phase of Pod pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe is Running (Ready = true) - Jul 29 16:49:14.227: INFO: Pod "pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe" satisfied condition "running and ready" - Jul 29 16:49:14.242: INFO: Pod pod-hostip-f23910b1-0e19-4fec-8907-31e6f620c0fe has hostIP: 192.168.121.141 - [AfterEach] [sig-node] Pods + [It] should allow substituting values in a volume subpath [Conformance] + test/e2e/common/node/expansion.go:112 + STEP: Creating a pod to test substitution in volume subpath 08/24/23 12:50:11.426 + Aug 24 12:50:11.464: INFO: Waiting up to 5m0s for pod "var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9" in namespace "var-expansion-7129" to be "Succeeded or Failed" + Aug 24 12:50:11.470: INFO: Pod "var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9": Phase="Pending", Reason="", readiness=false. Elapsed: 5.509366ms + Aug 24 12:50:13.478: INFO: Pod "var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013530736s + Aug 24 12:50:15.478: INFO: Pod "var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013468717s + STEP: Saw pod success 08/24/23 12:50:15.478 + Aug 24 12:50:15.479: INFO: Pod "var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9" satisfied condition "Succeeded or Failed" + Aug 24 12:50:15.484: INFO: Trying to get logs from node pe9deep4seen-3 pod var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9 container dapi-container: + STEP: delete the pod 08/24/23 12:50:15.494 + Aug 24 12:50:15.516: INFO: Waiting for pod var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9 to disappear + Aug 24 12:50:15.522: INFO: Pod var-expansion-c4ab10ce-5b03-498b-97bb-76b23568d9e9 no longer exists + [AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 - Jul 29 16:49:14.242: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods + Aug 24 12:50:15.522: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 - STEP: Destroying namespace "pods-3162" for this suite. 07/29/23 16:49:14.251 + STEP: Destroying namespace "var-expansion-7129" for this suite. 08/24/23 12:50:15.531 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSS +SSSSSSSSSSSS ------------------------------ -[sig-storage] Downward API volume +[sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:249 -[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/projected_downwardapi.go:249 +[BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:49:14.267 -Jul 29 16:49:14.267: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 16:49:14.27 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:14.325 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:14.349 -[BeforeEach] [sig-storage] Downward API volume +STEP: Creating a kubernetes client 08/24/23 12:50:15.549 +Aug 24 12:50:15.549: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:50:15.552 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:50:15.581 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:50:15.586 +[BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 [It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:249 -STEP: Creating a pod to test downward API volume plugin 07/29/23 16:49:14.368 -Jul 29 16:49:14.392: INFO: Waiting up to 5m0s for pod "downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91" in namespace "downward-api-5588" to be "Succeeded or Failed" -Jul 29 16:49:14.410: INFO: Pod "downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91": Phase="Pending", Reason="", readiness=false. Elapsed: 18.102102ms -Jul 29 16:49:16.422: INFO: Pod "downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91": Phase="Pending", Reason="", readiness=false. Elapsed: 2.030176948s -Jul 29 16:49:18.422: INFO: Pod "downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029723298s -STEP: Saw pod success 07/29/23 16:49:18.422 -Jul 29 16:49:18.423: INFO: Pod "downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91" satisfied condition "Succeeded or Failed" -Jul 29 16:49:18.429: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91 container client-container: -STEP: delete the pod 07/29/23 16:49:18.46 -Jul 29 16:49:18.484: INFO: Waiting for pod downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91 to disappear -Jul 29 16:49:18.489: INFO: Pod downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91 no longer exists -[AfterEach] [sig-storage] Downward API volume + test/e2e/common/storage/projected_downwardapi.go:249 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:50:15.592 +Aug 24 12:50:15.610: INFO: Waiting up to 5m0s for pod "downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6" in namespace "projected-5808" to be "Succeeded or Failed" +Aug 24 12:50:15.621: INFO: Pod "downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6": Phase="Pending", Reason="", readiness=false. Elapsed: 10.947834ms +Aug 24 12:50:17.638: INFO: Pod "downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027736153s +Aug 24 12:50:19.631: INFO: Pod "downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020705116s +STEP: Saw pod success 08/24/23 12:50:19.631 +Aug 24 12:50:19.631: INFO: Pod "downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6" satisfied condition "Succeeded or Failed" +Aug 24 12:50:19.641: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6 container client-container: +STEP: delete the pod 08/24/23 12:50:19.662 +Aug 24 12:50:19.693: INFO: Waiting for pod downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6 to disappear +Aug 24 12:50:19.702: INFO: Pod downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 -Jul 29 16:49:18.490: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Downward API volume +Aug 24 12:50:19.702: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-5588" for this suite. 07/29/23 16:49:18.499 +STEP: Destroying namespace "projected-5808" for this suite. 08/24/23 12:50:19.71 ------------------------------ -• [4.245 seconds] -[sig-storage] Downward API volume +• [4.172 seconds] +[sig-storage] Projected downwardAPI test/e2e/common/storage/framework.go:23 should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:249 + test/e2e/common/storage/projected_downwardapi.go:249 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Downward API volume + [BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:49:14.267 - Jul 29 16:49:14.267: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 16:49:14.27 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:14.325 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:14.349 - [BeforeEach] [sig-storage] Downward API volume + STEP: Creating a kubernetes client 08/24/23 12:50:15.549 + Aug 24 12:50:15.549: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:50:15.552 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:50:15.581 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:50:15.586 + [BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 [It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:249 - STEP: Creating a pod to test downward API volume plugin 07/29/23 16:49:14.368 - Jul 29 16:49:14.392: INFO: Waiting up to 5m0s for pod "downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91" in namespace "downward-api-5588" to be "Succeeded or Failed" - Jul 29 16:49:14.410: INFO: Pod "downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91": Phase="Pending", Reason="", readiness=false. Elapsed: 18.102102ms - Jul 29 16:49:16.422: INFO: Pod "downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91": Phase="Pending", Reason="", readiness=false. Elapsed: 2.030176948s - Jul 29 16:49:18.422: INFO: Pod "downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029723298s - STEP: Saw pod success 07/29/23 16:49:18.422 - Jul 29 16:49:18.423: INFO: Pod "downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91" satisfied condition "Succeeded or Failed" - Jul 29 16:49:18.429: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91 container client-container: - STEP: delete the pod 07/29/23 16:49:18.46 - Jul 29 16:49:18.484: INFO: Waiting for pod downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91 to disappear - Jul 29 16:49:18.489: INFO: Pod downwardapi-volume-18101355-8e2d-4634-8ddf-508b72ddad91 no longer exists - [AfterEach] [sig-storage] Downward API volume + test/e2e/common/storage/projected_downwardapi.go:249 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:50:15.592 + Aug 24 12:50:15.610: INFO: Waiting up to 5m0s for pod "downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6" in namespace "projected-5808" to be "Succeeded or Failed" + Aug 24 12:50:15.621: INFO: Pod "downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6": Phase="Pending", Reason="", readiness=false. Elapsed: 10.947834ms + Aug 24 12:50:17.638: INFO: Pod "downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027736153s + Aug 24 12:50:19.631: INFO: Pod "downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020705116s + STEP: Saw pod success 08/24/23 12:50:19.631 + Aug 24 12:50:19.631: INFO: Pod "downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6" satisfied condition "Succeeded or Failed" + Aug 24 12:50:19.641: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6 container client-container: + STEP: delete the pod 08/24/23 12:50:19.662 + Aug 24 12:50:19.693: INFO: Waiting for pod downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6 to disappear + Aug 24 12:50:19.702: INFO: Pod downwardapi-volume-79844474-adb7-4047-b549-2664d695e5d6 no longer exists + [AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 - Jul 29 16:49:18.490: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Downward API volume + Aug 24 12:50:19.702: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-5588" for this suite. 07/29/23 16:49:18.499 + STEP: Destroying namespace "projected-5808" for this suite. 08/24/23 12:50:19.71 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:137 -[BeforeEach] [sig-storage] EmptyDir volumes +[sig-node] Probing container + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:135 +[BeforeEach] [sig-node] Probing container set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:49:18.521 -Jul 29 16:49:18.521: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 16:49:18.523 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:18.553 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:18.557 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 12:50:19.727 +Aug 24 12:50:19.727: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-probe 08/24/23 12:50:19.729 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:50:19.766 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:50:19.775 +[BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 -[It] should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:137 -STEP: Creating a pod to test emptydir 0666 on tmpfs 07/29/23 16:49:18.563 -Jul 29 16:49:18.578: INFO: Waiting up to 5m0s for pod "pod-d4092621-d25e-4999-92d3-150910d714f2" in namespace "emptydir-6112" to be "Succeeded or Failed" -Jul 29 16:49:18.599: INFO: Pod "pod-d4092621-d25e-4999-92d3-150910d714f2": Phase="Pending", Reason="", readiness=false. Elapsed: 20.777344ms -Jul 29 16:49:20.612: INFO: Pod "pod-d4092621-d25e-4999-92d3-150910d714f2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.033716846s -Jul 29 16:49:22.609: INFO: Pod "pod-d4092621-d25e-4999-92d3-150910d714f2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030214112s -STEP: Saw pod success 07/29/23 16:49:22.609 -Jul 29 16:49:22.613: INFO: Pod "pod-d4092621-d25e-4999-92d3-150910d714f2" satisfied condition "Succeeded or Failed" -Jul 29 16:49:22.620: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-d4092621-d25e-4999-92d3-150910d714f2 container test-container: -STEP: delete the pod 07/29/23 16:49:22.636 -Jul 29 16:49:22.671: INFO: Waiting for pod pod-d4092621-d25e-4999-92d3-150910d714f2 to disappear -Jul 29 16:49:22.680: INFO: Pod pod-d4092621-d25e-4999-92d3-150910d714f2 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 +[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:135 +STEP: Creating pod busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0 in namespace container-probe-7226 08/24/23 12:50:19.78 +Aug 24 12:50:19.794: INFO: Waiting up to 5m0s for pod "busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0" in namespace "container-probe-7226" to be "not pending" +Aug 24 12:50:19.799: INFO: Pod "busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0": Phase="Pending", Reason="", readiness=false. Elapsed: 5.174102ms +Aug 24 12:50:21.808: INFO: Pod "busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0": Phase="Running", Reason="", readiness=true. Elapsed: 2.013454541s +Aug 24 12:50:21.808: INFO: Pod "busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0" satisfied condition "not pending" +Aug 24 12:50:21.808: INFO: Started pod busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0 in namespace container-probe-7226 +STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 12:50:21.808 +Aug 24 12:50:21.813: INFO: Initial restart count of pod busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0 is 0 +Aug 24 12:51:12.052: INFO: Restart count of pod container-probe-7226/busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0 is now 1 (50.23850926s elapsed) +STEP: deleting the pod 08/24/23 12:51:12.052 +[AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 -Jul 29 16:49:22.681: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 12:51:12.081: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-6112" for this suite. 07/29/23 16:49:22.705 +STEP: Destroying namespace "container-probe-7226" for this suite. 08/24/23 12:51:12.09 ------------------------------ -• [4.199 seconds] -[sig-storage] EmptyDir volumes -test/e2e/common/storage/framework.go:23 - should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:137 +• [SLOW TEST] [52.387 seconds] +[sig-node] Probing container +test/e2e/common/node/framework.go:23 + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:135 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-node] Probing container set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:49:18.521 - Jul 29 16:49:18.521: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 16:49:18.523 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:18.553 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:18.557 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 12:50:19.727 + Aug 24 12:50:19.727: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-probe 08/24/23 12:50:19.729 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:50:19.766 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:50:19.775 + [BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 - [It] should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:137 - STEP: Creating a pod to test emptydir 0666 on tmpfs 07/29/23 16:49:18.563 - Jul 29 16:49:18.578: INFO: Waiting up to 5m0s for pod "pod-d4092621-d25e-4999-92d3-150910d714f2" in namespace "emptydir-6112" to be "Succeeded or Failed" - Jul 29 16:49:18.599: INFO: Pod "pod-d4092621-d25e-4999-92d3-150910d714f2": Phase="Pending", Reason="", readiness=false. Elapsed: 20.777344ms - Jul 29 16:49:20.612: INFO: Pod "pod-d4092621-d25e-4999-92d3-150910d714f2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.033716846s - Jul 29 16:49:22.609: INFO: Pod "pod-d4092621-d25e-4999-92d3-150910d714f2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030214112s - STEP: Saw pod success 07/29/23 16:49:22.609 - Jul 29 16:49:22.613: INFO: Pod "pod-d4092621-d25e-4999-92d3-150910d714f2" satisfied condition "Succeeded or Failed" - Jul 29 16:49:22.620: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-d4092621-d25e-4999-92d3-150910d714f2 container test-container: - STEP: delete the pod 07/29/23 16:49:22.636 - Jul 29 16:49:22.671: INFO: Waiting for pod pod-d4092621-d25e-4999-92d3-150910d714f2 to disappear - Jul 29 16:49:22.680: INFO: Pod pod-d4092621-d25e-4999-92d3-150910d714f2 no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 + [It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:135 + STEP: Creating pod busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0 in namespace container-probe-7226 08/24/23 12:50:19.78 + Aug 24 12:50:19.794: INFO: Waiting up to 5m0s for pod "busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0" in namespace "container-probe-7226" to be "not pending" + Aug 24 12:50:19.799: INFO: Pod "busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0": Phase="Pending", Reason="", readiness=false. Elapsed: 5.174102ms + Aug 24 12:50:21.808: INFO: Pod "busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0": Phase="Running", Reason="", readiness=true. Elapsed: 2.013454541s + Aug 24 12:50:21.808: INFO: Pod "busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0" satisfied condition "not pending" + Aug 24 12:50:21.808: INFO: Started pod busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0 in namespace container-probe-7226 + STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 12:50:21.808 + Aug 24 12:50:21.813: INFO: Initial restart count of pod busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0 is 0 + Aug 24 12:51:12.052: INFO: Restart count of pod container-probe-7226/busybox-32ad0cdb-b3d9-4666-96b5-8ce77e6a14f0 is now 1 (50.23850926s elapsed) + STEP: deleting the pod 08/24/23 12:51:12.052 + [AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 - Jul 29 16:49:22.681: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 12:51:12.081: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-6112" for this suite. 07/29/23 16:49:22.705 + STEP: Destroying namespace "container-probe-7226" for this suite. 08/24/23 12:51:12.09 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should be able to change the type from ClusterIP to ExternalName [Conformance] - test/e2e/network/service.go:1515 -[BeforeEach] [sig-network] Services +[sig-apps] Job + should adopt matching orphans and release non-matching pods [Conformance] + test/e2e/apps/job.go:507 +[BeforeEach] [sig-apps] Job set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:49:22.723 -Jul 29 16:49:22.723: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 16:49:22.726 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:22.762 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:22.767 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 12:51:12.119 +Aug 24 12:51:12.119: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename job 08/24/23 12:51:12.122 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:12.155 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:12.16 +[BeforeEach] [sig-apps] Job test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should be able to change the type from ClusterIP to ExternalName [Conformance] - test/e2e/network/service.go:1515 -STEP: creating a service clusterip-service with the type=ClusterIP in namespace services-9942 07/29/23 16:49:22.772 -STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service 07/29/23 16:49:22.8 -STEP: creating service externalsvc in namespace services-9942 07/29/23 16:49:22.801 -STEP: creating replication controller externalsvc in namespace services-9942 07/29/23 16:49:22.837 -I0729 16:49:22.885775 13 runners.go:193] Created replication controller with name: externalsvc, namespace: services-9942, replica count: 2 -I0729 16:49:25.937263 13 runners.go:193] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -STEP: changing the ClusterIP service to type=ExternalName 07/29/23 16:49:25.943 -Jul 29 16:49:25.983: INFO: Creating new exec pod -Jul 29 16:49:26.004: INFO: Waiting up to 5m0s for pod "execpod2w25x" in namespace "services-9942" to be "running" -Jul 29 16:49:26.020: INFO: Pod "execpod2w25x": Phase="Pending", Reason="", readiness=false. Elapsed: 15.89274ms -Jul 29 16:49:28.033: INFO: Pod "execpod2w25x": Phase="Running", Reason="", readiness=true. Elapsed: 2.028515122s -Jul 29 16:49:28.033: INFO: Pod "execpod2w25x" satisfied condition "running" -Jul 29 16:49:28.033: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9942 exec execpod2w25x -- /bin/sh -x -c nslookup clusterip-service.services-9942.svc.cluster.local' -Jul 29 16:49:28.474: INFO: stderr: "+ nslookup clusterip-service.services-9942.svc.cluster.local\n" -Jul 29 16:49:28.474: INFO: stdout: "Server:\t\t10.233.0.10\nAddress:\t10.233.0.10#53\n\nclusterip-service.services-9942.svc.cluster.local\tcanonical name = externalsvc.services-9942.svc.cluster.local.\nName:\texternalsvc.services-9942.svc.cluster.local\nAddress: 10.233.44.44\n\n" -STEP: deleting ReplicationController externalsvc in namespace services-9942, will wait for the garbage collector to delete the pods 07/29/23 16:49:28.474 -Jul 29 16:49:28.557: INFO: Deleting ReplicationController externalsvc took: 16.19231ms -Jul 29 16:49:28.658: INFO: Terminating ReplicationController externalsvc pods took: 100.792473ms -Jul 29 16:49:31.085: INFO: Cleaning up the ClusterIP to ExternalName test service -[AfterEach] [sig-network] Services +[It] should adopt matching orphans and release non-matching pods [Conformance] + test/e2e/apps/job.go:507 +STEP: Creating a job 08/24/23 12:51:12.164 +STEP: Ensuring active pods == parallelism 08/24/23 12:51:12.181 +STEP: Orphaning one of the Job's Pods 08/24/23 12:51:14.192 +Aug 24 12:51:14.722: INFO: Successfully updated pod "adopt-release-pqfn5" +STEP: Checking that the Job readopts the Pod 08/24/23 12:51:14.722 +Aug 24 12:51:14.723: INFO: Waiting up to 15m0s for pod "adopt-release-pqfn5" in namespace "job-1788" to be "adopted" +Aug 24 12:51:14.728: INFO: Pod "adopt-release-pqfn5": Phase="Running", Reason="", readiness=true. Elapsed: 5.282528ms +Aug 24 12:51:16.737: INFO: Pod "adopt-release-pqfn5": Phase="Running", Reason="", readiness=true. Elapsed: 2.014503649s +Aug 24 12:51:16.737: INFO: Pod "adopt-release-pqfn5" satisfied condition "adopted" +STEP: Removing the labels from the Job's Pod 08/24/23 12:51:16.737 +Aug 24 12:51:17.268: INFO: Successfully updated pod "adopt-release-pqfn5" +STEP: Checking that the Job releases the Pod 08/24/23 12:51:17.268 +Aug 24 12:51:17.268: INFO: Waiting up to 15m0s for pod "adopt-release-pqfn5" in namespace "job-1788" to be "released" +Aug 24 12:51:17.292: INFO: Pod "adopt-release-pqfn5": Phase="Running", Reason="", readiness=true. Elapsed: 23.614887ms +Aug 24 12:51:19.301: INFO: Pod "adopt-release-pqfn5": Phase="Running", Reason="", readiness=true. Elapsed: 2.032448772s +Aug 24 12:51:19.301: INFO: Pod "adopt-release-pqfn5" satisfied condition "released" +[AfterEach] [sig-apps] Job test/e2e/framework/node/init/init.go:32 -Jul 29 16:49:31.124: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 12:51:19.302: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Job test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-apps] Job dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-apps] Job tear down framework | framework.go:193 -STEP: Destroying namespace "services-9942" for this suite. 07/29/23 16:49:31.134 +STEP: Destroying namespace "job-1788" for this suite. 08/24/23 12:51:19.312 ------------------------------ -• [SLOW TEST] [8.430 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should be able to change the type from ClusterIP to ExternalName [Conformance] - test/e2e/network/service.go:1515 +• [SLOW TEST] [7.207 seconds] +[sig-apps] Job +test/e2e/apps/framework.go:23 + should adopt matching orphans and release non-matching pods [Conformance] + test/e2e/apps/job.go:507 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-apps] Job set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:49:22.723 - Jul 29 16:49:22.723: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 16:49:22.726 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:22.762 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:22.767 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 12:51:12.119 + Aug 24 12:51:12.119: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename job 08/24/23 12:51:12.122 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:12.155 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:12.16 + [BeforeEach] [sig-apps] Job test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should be able to change the type from ClusterIP to ExternalName [Conformance] - test/e2e/network/service.go:1515 - STEP: creating a service clusterip-service with the type=ClusterIP in namespace services-9942 07/29/23 16:49:22.772 - STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service 07/29/23 16:49:22.8 - STEP: creating service externalsvc in namespace services-9942 07/29/23 16:49:22.801 - STEP: creating replication controller externalsvc in namespace services-9942 07/29/23 16:49:22.837 - I0729 16:49:22.885775 13 runners.go:193] Created replication controller with name: externalsvc, namespace: services-9942, replica count: 2 - I0729 16:49:25.937263 13 runners.go:193] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - STEP: changing the ClusterIP service to type=ExternalName 07/29/23 16:49:25.943 - Jul 29 16:49:25.983: INFO: Creating new exec pod - Jul 29 16:49:26.004: INFO: Waiting up to 5m0s for pod "execpod2w25x" in namespace "services-9942" to be "running" - Jul 29 16:49:26.020: INFO: Pod "execpod2w25x": Phase="Pending", Reason="", readiness=false. Elapsed: 15.89274ms - Jul 29 16:49:28.033: INFO: Pod "execpod2w25x": Phase="Running", Reason="", readiness=true. Elapsed: 2.028515122s - Jul 29 16:49:28.033: INFO: Pod "execpod2w25x" satisfied condition "running" - Jul 29 16:49:28.033: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-9942 exec execpod2w25x -- /bin/sh -x -c nslookup clusterip-service.services-9942.svc.cluster.local' - Jul 29 16:49:28.474: INFO: stderr: "+ nslookup clusterip-service.services-9942.svc.cluster.local\n" - Jul 29 16:49:28.474: INFO: stdout: "Server:\t\t10.233.0.10\nAddress:\t10.233.0.10#53\n\nclusterip-service.services-9942.svc.cluster.local\tcanonical name = externalsvc.services-9942.svc.cluster.local.\nName:\texternalsvc.services-9942.svc.cluster.local\nAddress: 10.233.44.44\n\n" - STEP: deleting ReplicationController externalsvc in namespace services-9942, will wait for the garbage collector to delete the pods 07/29/23 16:49:28.474 - Jul 29 16:49:28.557: INFO: Deleting ReplicationController externalsvc took: 16.19231ms - Jul 29 16:49:28.658: INFO: Terminating ReplicationController externalsvc pods took: 100.792473ms - Jul 29 16:49:31.085: INFO: Cleaning up the ClusterIP to ExternalName test service - [AfterEach] [sig-network] Services + [It] should adopt matching orphans and release non-matching pods [Conformance] + test/e2e/apps/job.go:507 + STEP: Creating a job 08/24/23 12:51:12.164 + STEP: Ensuring active pods == parallelism 08/24/23 12:51:12.181 + STEP: Orphaning one of the Job's Pods 08/24/23 12:51:14.192 + Aug 24 12:51:14.722: INFO: Successfully updated pod "adopt-release-pqfn5" + STEP: Checking that the Job readopts the Pod 08/24/23 12:51:14.722 + Aug 24 12:51:14.723: INFO: Waiting up to 15m0s for pod "adopt-release-pqfn5" in namespace "job-1788" to be "adopted" + Aug 24 12:51:14.728: INFO: Pod "adopt-release-pqfn5": Phase="Running", Reason="", readiness=true. Elapsed: 5.282528ms + Aug 24 12:51:16.737: INFO: Pod "adopt-release-pqfn5": Phase="Running", Reason="", readiness=true. Elapsed: 2.014503649s + Aug 24 12:51:16.737: INFO: Pod "adopt-release-pqfn5" satisfied condition "adopted" + STEP: Removing the labels from the Job's Pod 08/24/23 12:51:16.737 + Aug 24 12:51:17.268: INFO: Successfully updated pod "adopt-release-pqfn5" + STEP: Checking that the Job releases the Pod 08/24/23 12:51:17.268 + Aug 24 12:51:17.268: INFO: Waiting up to 15m0s for pod "adopt-release-pqfn5" in namespace "job-1788" to be "released" + Aug 24 12:51:17.292: INFO: Pod "adopt-release-pqfn5": Phase="Running", Reason="", readiness=true. Elapsed: 23.614887ms + Aug 24 12:51:19.301: INFO: Pod "adopt-release-pqfn5": Phase="Running", Reason="", readiness=true. Elapsed: 2.032448772s + Aug 24 12:51:19.301: INFO: Pod "adopt-release-pqfn5" satisfied condition "released" + [AfterEach] [sig-apps] Job test/e2e/framework/node/init/init.go:32 - Jul 29 16:49:31.124: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 12:51:19.302: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Job test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-apps] Job dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-apps] Job tear down framework | framework.go:193 - STEP: Destroying namespace "services-9942" for this suite. 07/29/23 16:49:31.134 + STEP: Destroying namespace "job-1788" for this suite. 08/24/23 12:51:19.312 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSS ------------------------------ -[sig-network] DNS - should provide DNS for services [Conformance] - test/e2e/network/dns.go:137 -[BeforeEach] [sig-network] DNS +[sig-storage] Projected downwardAPI + should provide container's cpu limit [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:193 +[BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:49:31.156 -Jul 29 16:49:31.156: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename dns 07/29/23 16:49:31.158 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:31.186 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:31.191 -[BeforeEach] [sig-network] DNS +STEP: Creating a kubernetes client 08/24/23 12:51:19.33 +Aug 24 12:51:19.330: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:51:19.333 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:19.359 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:19.365 +[BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 -[It] should provide DNS for services [Conformance] - test/e2e/network/dns.go:137 -STEP: Creating a test headless service 07/29/23 16:49:31.197 -STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-8931.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-8931.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search 64.13.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.13.64_udp@PTR;check="$$(dig +tcp +noall +answer +search 64.13.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.13.64_tcp@PTR;sleep 1; done - 07/29/23 16:49:31.226 -STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-8931.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-8931.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search 64.13.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.13.64_udp@PTR;check="$$(dig +tcp +noall +answer +search 64.13.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.13.64_tcp@PTR;sleep 1; done - 07/29/23 16:49:31.226 -STEP: creating a pod to probe DNS 07/29/23 16:49:31.227 -STEP: submitting the pod to kubernetes 07/29/23 16:49:31.227 -Jul 29 16:49:31.251: INFO: Waiting up to 15m0s for pod "dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03" in namespace "dns-8931" to be "running" -Jul 29 16:49:31.258: INFO: Pod "dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03": Phase="Pending", Reason="", readiness=false. Elapsed: 7.898979ms -Jul 29 16:49:33.270: INFO: Pod "dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03": Phase="Running", Reason="", readiness=true. Elapsed: 2.019561466s -Jul 29 16:49:33.270: INFO: Pod "dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03" satisfied condition "running" -STEP: retrieving the pod 07/29/23 16:49:33.27 -STEP: looking for the results for each expected name from probers 07/29/23 16:49:33.279 -Jul 29 16:49:33.294: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:33.305: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:33.318: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:33.331: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:33.382: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:33.391: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:33.401: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:33.410: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:33.442: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - -Jul 29 16:49:38.453: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:38.463: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:38.470: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:38.478: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:38.514: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:38.521: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:38.529: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:38.541: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:38.572: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - -Jul 29 16:49:43.452: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:43.461: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:43.467: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:43.475: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:43.516: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:43.531: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:43.544: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:43.554: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:43.589: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - -Jul 29 16:49:48.459: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:48.468: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:48.476: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:48.485: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:48.528: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:48.537: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:48.547: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:48.554: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:48.591: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - -Jul 29 16:49:53.454: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:53.461: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:53.468: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:53.475: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:53.514: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:53.523: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:53.530: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:53.538: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:53.566: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - -Jul 29 16:49:58.451: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:58.459: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:58.466: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:58.473: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:58.510: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:58.517: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:58.525: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:58.533: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) -Jul 29 16:49:58.562: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - -Jul 29 16:50:03.565: INFO: DNS probes using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 succeeded - -STEP: deleting the pod 07/29/23 16:50:03.565 -STEP: deleting the test service 07/29/23 16:50:03.585 -STEP: deleting the test headless service 07/29/23 16:50:03.719 -[AfterEach] [sig-network] DNS +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 +[It] should provide container's cpu limit [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:193 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:51:19.371 +Aug 24 12:51:19.390: INFO: Waiting up to 5m0s for pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa" in namespace "projected-6860" to be "Succeeded or Failed" +Aug 24 12:51:19.396: INFO: Pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa": Phase="Pending", Reason="", readiness=false. Elapsed: 5.698434ms +Aug 24 12:51:21.406: INFO: Pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa": Phase="Running", Reason="", readiness=true. Elapsed: 2.015717371s +Aug 24 12:51:23.432: INFO: Pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa": Phase="Running", Reason="", readiness=false. Elapsed: 4.042095887s +Aug 24 12:51:25.407: INFO: Pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016760615s +STEP: Saw pod success 08/24/23 12:51:25.408 +Aug 24 12:51:25.409: INFO: Pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa" satisfied condition "Succeeded or Failed" +Aug 24 12:51:25.417: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa container client-container: +STEP: delete the pod 08/24/23 12:51:25.433 +Aug 24 12:51:25.455: INFO: Waiting for pod downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa to disappear +Aug 24 12:51:25.460: INFO: Pod downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa no longer exists +[AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:03.782: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] DNS +Aug 24 12:51:25.461: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] DNS +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 -STEP: Destroying namespace "dns-8931" for this suite. 07/29/23 16:50:03.799 +STEP: Destroying namespace "projected-6860" for this suite. 08/24/23 12:51:25.47 ------------------------------ -• [SLOW TEST] [32.660 seconds] -[sig-network] DNS -test/e2e/network/common/framework.go:23 - should provide DNS for services [Conformance] - test/e2e/network/dns.go:137 +• [SLOW TEST] [6.153 seconds] +[sig-storage] Projected downwardAPI +test/e2e/common/storage/framework.go:23 + should provide container's cpu limit [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:193 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] DNS + [BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:49:31.156 - Jul 29 16:49:31.156: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename dns 07/29/23 16:49:31.158 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:49:31.186 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:49:31.191 - [BeforeEach] [sig-network] DNS + STEP: Creating a kubernetes client 08/24/23 12:51:19.33 + Aug 24 12:51:19.330: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:51:19.333 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:19.359 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:19.365 + [BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 - [It] should provide DNS for services [Conformance] - test/e2e/network/dns.go:137 - STEP: Creating a test headless service 07/29/23 16:49:31.197 - STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-8931.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-8931.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search 64.13.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.13.64_udp@PTR;check="$$(dig +tcp +noall +answer +search 64.13.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.13.64_tcp@PTR;sleep 1; done - 07/29/23 16:49:31.226 - STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-8931.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-8931.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-8931.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-8931.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-8931.svc.cluster.local;check="$$(dig +notcp +noall +answer +search 64.13.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.13.64_udp@PTR;check="$$(dig +tcp +noall +answer +search 64.13.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.13.64_tcp@PTR;sleep 1; done - 07/29/23 16:49:31.226 - STEP: creating a pod to probe DNS 07/29/23 16:49:31.227 - STEP: submitting the pod to kubernetes 07/29/23 16:49:31.227 - Jul 29 16:49:31.251: INFO: Waiting up to 15m0s for pod "dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03" in namespace "dns-8931" to be "running" - Jul 29 16:49:31.258: INFO: Pod "dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03": Phase="Pending", Reason="", readiness=false. Elapsed: 7.898979ms - Jul 29 16:49:33.270: INFO: Pod "dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03": Phase="Running", Reason="", readiness=true. Elapsed: 2.019561466s - Jul 29 16:49:33.270: INFO: Pod "dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03" satisfied condition "running" - STEP: retrieving the pod 07/29/23 16:49:33.27 - STEP: looking for the results for each expected name from probers 07/29/23 16:49:33.279 - Jul 29 16:49:33.294: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:33.305: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:33.318: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:33.331: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:33.382: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:33.391: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:33.401: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:33.410: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:33.442: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - - Jul 29 16:49:38.453: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:38.463: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:38.470: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:38.478: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:38.514: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:38.521: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:38.529: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:38.541: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:38.572: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - - Jul 29 16:49:43.452: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:43.461: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:43.467: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:43.475: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:43.516: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:43.531: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:43.544: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:43.554: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:43.589: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - - Jul 29 16:49:48.459: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:48.468: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:48.476: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:48.485: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:48.528: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:48.537: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:48.547: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:48.554: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:48.591: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - - Jul 29 16:49:53.454: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:53.461: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:53.468: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:53.475: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:53.514: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:53.523: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:53.530: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:53.538: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:53.566: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - - Jul 29 16:49:58.451: INFO: Unable to read wheezy_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:58.459: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:58.466: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:58.473: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:58.510: INFO: Unable to read jessie_udp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:58.517: INFO: Unable to read jessie_tcp@dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:58.525: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:58.533: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local from pod dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03: the server could not find the requested resource (get pods dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03) - Jul 29 16:49:58.562: INFO: Lookups using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 failed for: [wheezy_udp@dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@dns-test-service.dns-8931.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_udp@dns-test-service.dns-8931.svc.cluster.local jessie_tcp@dns-test-service.dns-8931.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8931.svc.cluster.local] - - Jul 29 16:50:03.565: INFO: DNS probes using dns-8931/dns-test-d39f95e1-8103-45c8-9e5b-e2c6129b2b03 succeeded - - STEP: deleting the pod 07/29/23 16:50:03.565 - STEP: deleting the test service 07/29/23 16:50:03.585 - STEP: deleting the test headless service 07/29/23 16:50:03.719 - [AfterEach] [sig-network] DNS + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 + [It] should provide container's cpu limit [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:193 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:51:19.371 + Aug 24 12:51:19.390: INFO: Waiting up to 5m0s for pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa" in namespace "projected-6860" to be "Succeeded or Failed" + Aug 24 12:51:19.396: INFO: Pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa": Phase="Pending", Reason="", readiness=false. Elapsed: 5.698434ms + Aug 24 12:51:21.406: INFO: Pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa": Phase="Running", Reason="", readiness=true. Elapsed: 2.015717371s + Aug 24 12:51:23.432: INFO: Pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa": Phase="Running", Reason="", readiness=false. Elapsed: 4.042095887s + Aug 24 12:51:25.407: INFO: Pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016760615s + STEP: Saw pod success 08/24/23 12:51:25.408 + Aug 24 12:51:25.409: INFO: Pod "downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa" satisfied condition "Succeeded or Failed" + Aug 24 12:51:25.417: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa container client-container: + STEP: delete the pod 08/24/23 12:51:25.433 + Aug 24 12:51:25.455: INFO: Waiting for pod downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa to disappear + Aug 24 12:51:25.460: INFO: Pod downwardapi-volume-ace5bc77-fdfb-4ed1-b1af-c85cf858f0aa no longer exists + [AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:03.782: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] DNS + Aug 24 12:51:25.461: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] DNS + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 - STEP: Destroying namespace "dns-8931" for this suite. 07/29/23 16:50:03.799 + STEP: Destroying namespace "projected-6860" for this suite. 08/24/23 12:51:25.47 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-network] DNS - should provide /etc/hosts entries for the cluster [Conformance] - test/e2e/network/dns.go:117 + should support configurable pod DNS nameservers [Conformance] + test/e2e/network/dns.go:411 [BeforeEach] [sig-network] DNS set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:03.819 -Jul 29 16:50:03.819: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename dns 07/29/23 16:50:03.821 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:03.879 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:03.886 +STEP: Creating a kubernetes client 08/24/23 12:51:25.485 +Aug 24 12:51:25.485: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename dns 08/24/23 12:51:25.488 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:25.514 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:25.52 [BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 -[It] should provide /etc/hosts entries for the cluster [Conformance] - test/e2e/network/dns.go:117 -STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-3640.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-3640.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;sleep 1; done - 07/29/23 16:50:03.944 -STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-3640.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-3640.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;sleep 1; done - 07/29/23 16:50:03.944 -STEP: creating a pod to probe /etc/hosts 07/29/23 16:50:03.944 -STEP: submitting the pod to kubernetes 07/29/23 16:50:03.945 -Jul 29 16:50:03.969: INFO: Waiting up to 15m0s for pod "dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4" in namespace "dns-3640" to be "running" -Jul 29 16:50:03.978: INFO: Pod "dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4": Phase="Pending", Reason="", readiness=false. Elapsed: 8.648896ms -Jul 29 16:50:05.989: INFO: Pod "dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020057542s -Jul 29 16:50:07.987: INFO: Pod "dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4": Phase="Running", Reason="", readiness=true. Elapsed: 4.017461665s -Jul 29 16:50:07.987: INFO: Pod "dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4" satisfied condition "running" -STEP: retrieving the pod 07/29/23 16:50:07.987 -STEP: looking for the results for each expected name from probers 07/29/23 16:50:07.995 -Jul 29 16:50:08.032: INFO: DNS probes using dns-3640/dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4 succeeded - -STEP: deleting the pod 07/29/23 16:50:08.032 +[It] should support configurable pod DNS nameservers [Conformance] + test/e2e/network/dns.go:411 +STEP: Creating a pod with dnsPolicy=None and customized dnsConfig... 08/24/23 12:51:25.526 +Aug 24 12:51:25.544: INFO: Created pod &Pod{ObjectMeta:{test-dns-nameservers dns-1113 f2fc250a-0446-42df-8fe9-153df8cce098 29323 0 2023-08-24 12:51:25 +0000 UTC map[] map[] [] [] [{e2e.test Update v1 2023-08-24 12:51:25 +0000 UTC FieldsV1 {"f:spec":{"f:containers":{"k:{\"name\":\"agnhost-container\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsConfig":{".":{},"f:nameservers":{},"f:searches":{}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-s5pm2,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost-container,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[pause],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s5pm2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:None,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:&PodDNSConfig{Nameservers:[1.1.1.1],Searches:[resolv.conf.local],Options:[]PodDNSConfigOption{},},ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:51:25.546: INFO: Waiting up to 5m0s for pod "test-dns-nameservers" in namespace "dns-1113" to be "running and ready" +Aug 24 12:51:25.553: INFO: Pod "test-dns-nameservers": Phase="Pending", Reason="", readiness=false. Elapsed: 6.62375ms +Aug 24 12:51:25.553: INFO: The phase of Pod test-dns-nameservers is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:51:27.561: INFO: Pod "test-dns-nameservers": Phase="Running", Reason="", readiness=true. Elapsed: 2.014879875s +Aug 24 12:51:27.561: INFO: The phase of Pod test-dns-nameservers is Running (Ready = true) +Aug 24 12:51:27.562: INFO: Pod "test-dns-nameservers" satisfied condition "running and ready" +STEP: Verifying customized DNS suffix list is configured on pod... 08/24/23 12:51:27.562 +Aug 24 12:51:27.562: INFO: ExecWithOptions {Command:[/agnhost dns-suffix] Namespace:dns-1113 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:51:27.562: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:51:27.564: INFO: ExecWithOptions: Clientset creation +Aug 24 12:51:27.564: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/dns-1113/pods/test-dns-nameservers/exec?command=%2Fagnhost&command=dns-suffix&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) +STEP: Verifying customized DNS server is configured on pod... 08/24/23 12:51:27.715 +Aug 24 12:51:27.716: INFO: ExecWithOptions {Command:[/agnhost dns-server-list] Namespace:dns-1113 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:51:27.716: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:51:27.717: INFO: ExecWithOptions: Clientset creation +Aug 24 12:51:27.717: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/dns-1113/pods/test-dns-nameservers/exec?command=%2Fagnhost&command=dns-server-list&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) +Aug 24 12:51:27.875: INFO: Deleting pod test-dns-nameservers... [AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:08.068: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:51:27.901: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 -STEP: Destroying namespace "dns-3640" for this suite. 07/29/23 16:50:08.084 +STEP: Destroying namespace "dns-1113" for this suite. 08/24/23 12:51:27.911 ------------------------------ -• [4.292 seconds] +• [2.441 seconds] [sig-network] DNS test/e2e/network/common/framework.go:23 - should provide /etc/hosts entries for the cluster [Conformance] - test/e2e/network/dns.go:117 + should support configurable pod DNS nameservers [Conformance] + test/e2e/network/dns.go:411 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-network] DNS set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:03.819 - Jul 29 16:50:03.819: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename dns 07/29/23 16:50:03.821 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:03.879 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:03.886 + STEP: Creating a kubernetes client 08/24/23 12:51:25.485 + Aug 24 12:51:25.485: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename dns 08/24/23 12:51:25.488 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:25.514 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:25.52 [BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 - [It] should provide /etc/hosts entries for the cluster [Conformance] - test/e2e/network/dns.go:117 - STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-3640.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-3640.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;sleep 1; done - 07/29/23 16:50:03.944 - STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-3640.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-3640.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;sleep 1; done - 07/29/23 16:50:03.944 - STEP: creating a pod to probe /etc/hosts 07/29/23 16:50:03.944 - STEP: submitting the pod to kubernetes 07/29/23 16:50:03.945 - Jul 29 16:50:03.969: INFO: Waiting up to 15m0s for pod "dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4" in namespace "dns-3640" to be "running" - Jul 29 16:50:03.978: INFO: Pod "dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4": Phase="Pending", Reason="", readiness=false. Elapsed: 8.648896ms - Jul 29 16:50:05.989: INFO: Pod "dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020057542s - Jul 29 16:50:07.987: INFO: Pod "dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4": Phase="Running", Reason="", readiness=true. Elapsed: 4.017461665s - Jul 29 16:50:07.987: INFO: Pod "dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4" satisfied condition "running" - STEP: retrieving the pod 07/29/23 16:50:07.987 - STEP: looking for the results for each expected name from probers 07/29/23 16:50:07.995 - Jul 29 16:50:08.032: INFO: DNS probes using dns-3640/dns-test-fd1c7a25-b6e0-421e-a72d-9b3c0251d2c4 succeeded - - STEP: deleting the pod 07/29/23 16:50:08.032 + [It] should support configurable pod DNS nameservers [Conformance] + test/e2e/network/dns.go:411 + STEP: Creating a pod with dnsPolicy=None and customized dnsConfig... 08/24/23 12:51:25.526 + Aug 24 12:51:25.544: INFO: Created pod &Pod{ObjectMeta:{test-dns-nameservers dns-1113 f2fc250a-0446-42df-8fe9-153df8cce098 29323 0 2023-08-24 12:51:25 +0000 UTC map[] map[] [] [] [{e2e.test Update v1 2023-08-24 12:51:25 +0000 UTC FieldsV1 {"f:spec":{"f:containers":{"k:{\"name\":\"agnhost-container\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsConfig":{".":{},"f:nameservers":{},"f:searches":{}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-s5pm2,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost-container,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[pause],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s5pm2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:None,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:&PodDNSConfig{Nameservers:[1.1.1.1],Searches:[resolv.conf.local],Options:[]PodDNSConfigOption{},},ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:51:25.546: INFO: Waiting up to 5m0s for pod "test-dns-nameservers" in namespace "dns-1113" to be "running and ready" + Aug 24 12:51:25.553: INFO: Pod "test-dns-nameservers": Phase="Pending", Reason="", readiness=false. Elapsed: 6.62375ms + Aug 24 12:51:25.553: INFO: The phase of Pod test-dns-nameservers is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:51:27.561: INFO: Pod "test-dns-nameservers": Phase="Running", Reason="", readiness=true. Elapsed: 2.014879875s + Aug 24 12:51:27.561: INFO: The phase of Pod test-dns-nameservers is Running (Ready = true) + Aug 24 12:51:27.562: INFO: Pod "test-dns-nameservers" satisfied condition "running and ready" + STEP: Verifying customized DNS suffix list is configured on pod... 08/24/23 12:51:27.562 + Aug 24 12:51:27.562: INFO: ExecWithOptions {Command:[/agnhost dns-suffix] Namespace:dns-1113 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:51:27.562: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:51:27.564: INFO: ExecWithOptions: Clientset creation + Aug 24 12:51:27.564: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/dns-1113/pods/test-dns-nameservers/exec?command=%2Fagnhost&command=dns-suffix&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) + STEP: Verifying customized DNS server is configured on pod... 08/24/23 12:51:27.715 + Aug 24 12:51:27.716: INFO: ExecWithOptions {Command:[/agnhost dns-server-list] Namespace:dns-1113 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:51:27.716: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:51:27.717: INFO: ExecWithOptions: Clientset creation + Aug 24 12:51:27.717: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/dns-1113/pods/test-dns-nameservers/exec?command=%2Fagnhost&command=dns-server-list&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) + Aug 24 12:51:27.875: INFO: Deleting pod test-dns-nameservers... [AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:08.068: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:51:27.901: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 - STEP: Destroying namespace "dns-3640" for this suite. 07/29/23 16:50:08.084 + STEP: Destroying namespace "dns-1113" for this suite. 08/24/23 12:51:27.911 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +S ------------------------------ -[sig-storage] Downward API volume - should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:261 -[BeforeEach] [sig-storage] Downward API volume +[sig-node] Downward API + should provide pod UID as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:267 +[BeforeEach] [sig-node] Downward API set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:08.112 -Jul 29 16:50:08.112: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 16:50:08.114 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:08.15 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:08.155 -[BeforeEach] [sig-storage] Downward API volume +STEP: Creating a kubernetes client 08/24/23 12:51:27.928 +Aug 24 12:51:27.929: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:51:27.932 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:27.965 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:27.969 +[BeforeEach] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 -[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:261 -STEP: Creating a pod to test downward API volume plugin 07/29/23 16:50:08.162 -Jul 29 16:50:08.192: INFO: Waiting up to 5m0s for pod "downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5" in namespace "downward-api-97" to be "Succeeded or Failed" -Jul 29 16:50:08.201: INFO: Pod "downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5": Phase="Pending", Reason="", readiness=false. Elapsed: 8.298966ms -Jul 29 16:50:10.211: INFO: Pod "downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018752763s -Jul 29 16:50:12.211: INFO: Pod "downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018759828s -STEP: Saw pod success 07/29/23 16:50:12.212 -Jul 29 16:50:12.212: INFO: Pod "downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5" satisfied condition "Succeeded or Failed" -Jul 29 16:50:12.222: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5 container client-container: -STEP: delete the pod 07/29/23 16:50:12.24 -Jul 29 16:50:12.268: INFO: Waiting for pod downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5 to disappear -Jul 29 16:50:12.276: INFO: Pod downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5 no longer exists -[AfterEach] [sig-storage] Downward API volume +[It] should provide pod UID as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:267 +STEP: Creating a pod to test downward api env vars 08/24/23 12:51:27.974 +Aug 24 12:51:27.994: INFO: Waiting up to 5m0s for pod "downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3" in namespace "downward-api-4095" to be "Succeeded or Failed" +Aug 24 12:51:28.007: INFO: Pod "downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3": Phase="Pending", Reason="", readiness=false. Elapsed: 13.223254ms +Aug 24 12:51:30.016: INFO: Pod "downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022030081s +Aug 24 12:51:32.018: INFO: Pod "downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024166289s +STEP: Saw pod success 08/24/23 12:51:32.018 +Aug 24 12:51:32.019: INFO: Pod "downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3" satisfied condition "Succeeded or Failed" +Aug 24 12:51:32.026: INFO: Trying to get logs from node pe9deep4seen-3 pod downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3 container dapi-container: +STEP: delete the pod 08/24/23 12:51:32.041 +Aug 24 12:51:32.069: INFO: Waiting for pod downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3 to disappear +Aug 24 12:51:32.076: INFO: Pod downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3 no longer exists +[AfterEach] [sig-node] Downward API test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:12.277: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Downward API volume +Aug 24 12:51:32.078: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-node] Downward API dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-node] Downward API tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-97" for this suite. 07/29/23 16:50:12.289 +STEP: Destroying namespace "downward-api-4095" for this suite. 08/24/23 12:51:32.09 ------------------------------ -• [4.199 seconds] -[sig-storage] Downward API volume -test/e2e/common/storage/framework.go:23 - should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:261 +• [4.175 seconds] +[sig-node] Downward API +test/e2e/common/node/framework.go:23 + should provide pod UID as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:267 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Downward API volume + [BeforeEach] [sig-node] Downward API set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:08.112 - Jul 29 16:50:08.112: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 16:50:08.114 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:08.15 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:08.155 - [BeforeEach] [sig-storage] Downward API volume + STEP: Creating a kubernetes client 08/24/23 12:51:27.928 + Aug 24 12:51:27.929: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:51:27.932 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:27.965 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:27.969 + [BeforeEach] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 - [It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:261 - STEP: Creating a pod to test downward API volume plugin 07/29/23 16:50:08.162 - Jul 29 16:50:08.192: INFO: Waiting up to 5m0s for pod "downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5" in namespace "downward-api-97" to be "Succeeded or Failed" - Jul 29 16:50:08.201: INFO: Pod "downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5": Phase="Pending", Reason="", readiness=false. Elapsed: 8.298966ms - Jul 29 16:50:10.211: INFO: Pod "downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018752763s - Jul 29 16:50:12.211: INFO: Pod "downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018759828s - STEP: Saw pod success 07/29/23 16:50:12.212 - Jul 29 16:50:12.212: INFO: Pod "downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5" satisfied condition "Succeeded or Failed" - Jul 29 16:50:12.222: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5 container client-container: - STEP: delete the pod 07/29/23 16:50:12.24 - Jul 29 16:50:12.268: INFO: Waiting for pod downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5 to disappear - Jul 29 16:50:12.276: INFO: Pod downwardapi-volume-1cdd2c91-03df-4a43-bc4f-9f5cf0972eb5 no longer exists - [AfterEach] [sig-storage] Downward API volume + [It] should provide pod UID as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:267 + STEP: Creating a pod to test downward api env vars 08/24/23 12:51:27.974 + Aug 24 12:51:27.994: INFO: Waiting up to 5m0s for pod "downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3" in namespace "downward-api-4095" to be "Succeeded or Failed" + Aug 24 12:51:28.007: INFO: Pod "downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3": Phase="Pending", Reason="", readiness=false. Elapsed: 13.223254ms + Aug 24 12:51:30.016: INFO: Pod "downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022030081s + Aug 24 12:51:32.018: INFO: Pod "downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024166289s + STEP: Saw pod success 08/24/23 12:51:32.018 + Aug 24 12:51:32.019: INFO: Pod "downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3" satisfied condition "Succeeded or Failed" + Aug 24 12:51:32.026: INFO: Trying to get logs from node pe9deep4seen-3 pod downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3 container dapi-container: + STEP: delete the pod 08/24/23 12:51:32.041 + Aug 24 12:51:32.069: INFO: Waiting for pod downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3 to disappear + Aug 24 12:51:32.076: INFO: Pod downward-api-e0b37d56-43d9-4f1e-84d9-81e7e5be9bf3 no longer exists + [AfterEach] [sig-node] Downward API test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:12.277: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Downward API volume + Aug 24 12:51:32.078: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-node] Downward API dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-node] Downward API tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-97" for this suite. 07/29/23 16:50:12.289 + STEP: Destroying namespace "downward-api-4095" for this suite. 08/24/23 12:51:32.09 << End Captured GinkgoWriter Output ------------------------------ -[sig-storage] Secrets - should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:47 -[BeforeEach] [sig-storage] Secrets +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Variable Expansion + should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] + test/e2e/common/node/expansion.go:186 +[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:12.312 -Jul 29 16:50:12.312: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 16:50:12.316 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:12.355 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:12.376 -[BeforeEach] [sig-storage] Secrets +STEP: Creating a kubernetes client 08/24/23 12:51:32.109 +Aug 24 12:51:32.109: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename var-expansion 08/24/23 12:51:32.112 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:32.138 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:32.144 +[BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:47 -STEP: Creating secret with name secret-test-a21ac9f1-7129-4690-97bd-4cca963f0bb1 07/29/23 16:50:12.385 -STEP: Creating a pod to test consume secrets 07/29/23 16:50:12.403 -Jul 29 16:50:12.432: INFO: Waiting up to 5m0s for pod "pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77" in namespace "secrets-9464" to be "Succeeded or Failed" -Jul 29 16:50:12.483: INFO: Pod "pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77": Phase="Pending", Reason="", readiness=false. Elapsed: 51.266705ms -Jul 29 16:50:14.492: INFO: Pod "pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77": Phase="Pending", Reason="", readiness=false. Elapsed: 2.059835191s -Jul 29 16:50:16.494: INFO: Pod "pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.062282626s -STEP: Saw pod success 07/29/23 16:50:16.495 -Jul 29 16:50:16.495: INFO: Pod "pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77" satisfied condition "Succeeded or Failed" -Jul 29 16:50:16.504: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77 container secret-volume-test: -STEP: delete the pod 07/29/23 16:50:16.529 -Jul 29 16:50:16.564: INFO: Waiting for pod pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77 to disappear -Jul 29 16:50:16.572: INFO: Pod pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77 no longer exists -[AfterEach] [sig-storage] Secrets +[It] should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] + test/e2e/common/node/expansion.go:186 +Aug 24 12:51:32.165: INFO: Waiting up to 2m0s for pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91" in namespace "var-expansion-6197" to be "container 0 failed with reason CreateContainerConfigError" +Aug 24 12:51:32.169: INFO: Pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91": Phase="Pending", Reason="", readiness=false. Elapsed: 4.224346ms +Aug 24 12:51:34.175: INFO: Pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91": Phase="Pending", Reason="", readiness=false. Elapsed: 2.010414746s +Aug 24 12:51:34.175: INFO: Pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91" satisfied condition "container 0 failed with reason CreateContainerConfigError" +Aug 24 12:51:34.175: INFO: Deleting pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91" in namespace "var-expansion-6197" +Aug 24 12:51:34.190: INFO: Wait up to 5m0s for pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91" to be fully deleted +[AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:16.572: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Secrets +Aug 24 12:51:36.208: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-9464" for this suite. 07/29/23 16:50:16.581 +STEP: Destroying namespace "var-expansion-6197" for this suite. 08/24/23 12:51:36.217 ------------------------------ -• [4.286 seconds] -[sig-storage] Secrets -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:47 +• [4.122 seconds] +[sig-node] Variable Expansion +test/e2e/common/node/framework.go:23 + should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] + test/e2e/common/node/expansion.go:186 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Secrets + [BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:12.312 - Jul 29 16:50:12.312: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 16:50:12.316 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:12.355 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:12.376 - [BeforeEach] [sig-storage] Secrets + STEP: Creating a kubernetes client 08/24/23 12:51:32.109 + Aug 24 12:51:32.109: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename var-expansion 08/24/23 12:51:32.112 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:32.138 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:32.144 + [BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:47 - STEP: Creating secret with name secret-test-a21ac9f1-7129-4690-97bd-4cca963f0bb1 07/29/23 16:50:12.385 - STEP: Creating a pod to test consume secrets 07/29/23 16:50:12.403 - Jul 29 16:50:12.432: INFO: Waiting up to 5m0s for pod "pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77" in namespace "secrets-9464" to be "Succeeded or Failed" - Jul 29 16:50:12.483: INFO: Pod "pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77": Phase="Pending", Reason="", readiness=false. Elapsed: 51.266705ms - Jul 29 16:50:14.492: INFO: Pod "pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77": Phase="Pending", Reason="", readiness=false. Elapsed: 2.059835191s - Jul 29 16:50:16.494: INFO: Pod "pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.062282626s - STEP: Saw pod success 07/29/23 16:50:16.495 - Jul 29 16:50:16.495: INFO: Pod "pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77" satisfied condition "Succeeded or Failed" - Jul 29 16:50:16.504: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77 container secret-volume-test: - STEP: delete the pod 07/29/23 16:50:16.529 - Jul 29 16:50:16.564: INFO: Waiting for pod pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77 to disappear - Jul 29 16:50:16.572: INFO: Pod pod-secrets-b38262ae-13b5-4906-9984-39fe0c81cf77 no longer exists - [AfterEach] [sig-storage] Secrets + [It] should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] + test/e2e/common/node/expansion.go:186 + Aug 24 12:51:32.165: INFO: Waiting up to 2m0s for pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91" in namespace "var-expansion-6197" to be "container 0 failed with reason CreateContainerConfigError" + Aug 24 12:51:32.169: INFO: Pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91": Phase="Pending", Reason="", readiness=false. Elapsed: 4.224346ms + Aug 24 12:51:34.175: INFO: Pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91": Phase="Pending", Reason="", readiness=false. Elapsed: 2.010414746s + Aug 24 12:51:34.175: INFO: Pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91" satisfied condition "container 0 failed with reason CreateContainerConfigError" + Aug 24 12:51:34.175: INFO: Deleting pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91" in namespace "var-expansion-6197" + Aug 24 12:51:34.190: INFO: Wait up to 5m0s for pod "var-expansion-3246b1d7-cd7f-4c76-95da-16c8dd223c91" to be fully deleted + [AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:16.572: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Secrets + Aug 24 12:51:36.208: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-9464" for this suite. 07/29/23 16:50:16.581 + STEP: Destroying namespace "var-expansion-6197" for this suite. 08/24/23 12:51:36.217 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSS +S ------------------------------ -[sig-network] Networking Granular Checks: Pods - should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:122 -[BeforeEach] [sig-network] Networking +[sig-network] Services + should find a service from listing all namespaces [Conformance] + test/e2e/network/service.go:3219 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:16.603 -Jul 29 16:50:16.604: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pod-network-test 07/29/23 16:50:16.608 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:16.641 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:16.663 -[BeforeEach] [sig-network] Networking +STEP: Creating a kubernetes client 08/24/23 12:51:36.233 +Aug 24 12:51:36.233: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 12:51:36.236 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:36.26 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:36.267 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[It] should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:122 -STEP: Performing setup for networking test in namespace pod-network-test-5366 07/29/23 16:50:16.673 -STEP: creating a selector 07/29/23 16:50:16.673 -STEP: Creating the service pods in kubernetes 07/29/23 16:50:16.674 -Jul 29 16:50:16.674: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable -Jul 29 16:50:16.748: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-5366" to be "running and ready" -Jul 29 16:50:16.794: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 45.817628ms -Jul 29 16:50:16.794: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:50:18.826: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.07799202s -Jul 29 16:50:18.826: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:50:20.804: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.056322147s -Jul 29 16:50:20.804: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:50:22.801: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.053302988s -Jul 29 16:50:22.801: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:50:24.804: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.055849225s -Jul 29 16:50:24.804: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:50:26.808: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.060059479s -Jul 29 16:50:26.808: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:50:28.803: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 12.05519089s -Jul 29 16:50:28.803: INFO: The phase of Pod netserver-0 is Running (Ready = true) -Jul 29 16:50:28.803: INFO: Pod "netserver-0" satisfied condition "running and ready" -Jul 29 16:50:28.814: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-5366" to be "running and ready" -Jul 29 16:50:28.822: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 7.580246ms -Jul 29 16:50:28.822: INFO: The phase of Pod netserver-1 is Running (Ready = true) -Jul 29 16:50:28.822: INFO: Pod "netserver-1" satisfied condition "running and ready" -Jul 29 16:50:28.830: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-5366" to be "running and ready" -Jul 29 16:50:28.839: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 8.256555ms -Jul 29 16:50:28.839: INFO: The phase of Pod netserver-2 is Running (Ready = true) -Jul 29 16:50:28.839: INFO: Pod "netserver-2" satisfied condition "running and ready" -STEP: Creating test pods 07/29/23 16:50:28.846 -Jul 29 16:50:28.872: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-5366" to be "running" -Jul 29 16:50:28.891: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 18.665465ms -Jul 29 16:50:30.925: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.05272634s -Jul 29 16:50:30.926: INFO: Pod "test-container-pod" satisfied condition "running" -Jul 29 16:50:30.939: INFO: Waiting up to 5m0s for pod "host-test-container-pod" in namespace "pod-network-test-5366" to be "running" -Jul 29 16:50:30.945: INFO: Pod "host-test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 5.799115ms -Jul 29 16:50:30.945: INFO: Pod "host-test-container-pod" satisfied condition "running" -Jul 29 16:50:30.950: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 -Jul 29 16:50:30.951: INFO: Going to poll 10.233.64.28 on port 8081 at least 0 times, with a maximum of 39 tries before failing -Jul 29 16:50:30.956: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.64.28 8081 | grep -v '^\s*$'] Namespace:pod-network-test-5366 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:50:30.956: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:50:30.957: INFO: ExecWithOptions: Clientset creation -Jul 29 16:50:30.957: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-5366/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.64.28+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) -Jul 29 16:50:32.099: INFO: Found all 1 expected endpoints: [netserver-0] -Jul 29 16:50:32.099: INFO: Going to poll 10.233.65.125 on port 8081 at least 0 times, with a maximum of 39 tries before failing -Jul 29 16:50:32.106: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.65.125 8081 | grep -v '^\s*$'] Namespace:pod-network-test-5366 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:50:32.106: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:50:32.109: INFO: ExecWithOptions: Clientset creation -Jul 29 16:50:32.109: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-5366/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.65.125+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) -Jul 29 16:50:33.210: INFO: Found all 1 expected endpoints: [netserver-1] -Jul 29 16:50:33.210: INFO: Going to poll 10.233.66.117 on port 8081 at least 0 times, with a maximum of 39 tries before failing -Jul 29 16:50:33.219: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.66.117 8081 | grep -v '^\s*$'] Namespace:pod-network-test-5366 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:50:33.219: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:50:33.221: INFO: ExecWithOptions: Clientset creation -Jul 29 16:50:33.221: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-5366/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.66.117+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) -Jul 29 16:50:34.321: INFO: Found all 1 expected endpoints: [netserver-2] -[AfterEach] [sig-network] Networking +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should find a service from listing all namespaces [Conformance] + test/e2e/network/service.go:3219 +STEP: fetching services 08/24/23 12:51:36.273 +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:34.321: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Networking +Aug 24 12:51:36.279: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Networking +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Networking +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "pod-network-test-5366" for this suite. 07/29/23 16:50:34.332 +STEP: Destroying namespace "services-5794" for this suite. 08/24/23 12:51:36.289 ------------------------------ -• [SLOW TEST] [17.742 seconds] -[sig-network] Networking -test/e2e/common/network/framework.go:23 - Granular Checks: Pods - test/e2e/common/network/networking.go:32 - should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:122 +• [0.069 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should find a service from listing all namespaces [Conformance] + test/e2e/network/service.go:3219 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Networking + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:16.603 - Jul 29 16:50:16.604: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pod-network-test 07/29/23 16:50:16.608 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:16.641 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:16.663 - [BeforeEach] [sig-network] Networking + STEP: Creating a kubernetes client 08/24/23 12:51:36.233 + Aug 24 12:51:36.233: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 12:51:36.236 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:36.26 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:36.267 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [It] should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:122 - STEP: Performing setup for networking test in namespace pod-network-test-5366 07/29/23 16:50:16.673 - STEP: creating a selector 07/29/23 16:50:16.673 - STEP: Creating the service pods in kubernetes 07/29/23 16:50:16.674 - Jul 29 16:50:16.674: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable - Jul 29 16:50:16.748: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-5366" to be "running and ready" - Jul 29 16:50:16.794: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 45.817628ms - Jul 29 16:50:16.794: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:50:18.826: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.07799202s - Jul 29 16:50:18.826: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:50:20.804: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.056322147s - Jul 29 16:50:20.804: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:50:22.801: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.053302988s - Jul 29 16:50:22.801: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:50:24.804: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.055849225s - Jul 29 16:50:24.804: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:50:26.808: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.060059479s - Jul 29 16:50:26.808: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:50:28.803: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 12.05519089s - Jul 29 16:50:28.803: INFO: The phase of Pod netserver-0 is Running (Ready = true) - Jul 29 16:50:28.803: INFO: Pod "netserver-0" satisfied condition "running and ready" - Jul 29 16:50:28.814: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-5366" to be "running and ready" - Jul 29 16:50:28.822: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 7.580246ms - Jul 29 16:50:28.822: INFO: The phase of Pod netserver-1 is Running (Ready = true) - Jul 29 16:50:28.822: INFO: Pod "netserver-1" satisfied condition "running and ready" - Jul 29 16:50:28.830: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-5366" to be "running and ready" - Jul 29 16:50:28.839: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 8.256555ms - Jul 29 16:50:28.839: INFO: The phase of Pod netserver-2 is Running (Ready = true) - Jul 29 16:50:28.839: INFO: Pod "netserver-2" satisfied condition "running and ready" - STEP: Creating test pods 07/29/23 16:50:28.846 - Jul 29 16:50:28.872: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-5366" to be "running" - Jul 29 16:50:28.891: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 18.665465ms - Jul 29 16:50:30.925: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.05272634s - Jul 29 16:50:30.926: INFO: Pod "test-container-pod" satisfied condition "running" - Jul 29 16:50:30.939: INFO: Waiting up to 5m0s for pod "host-test-container-pod" in namespace "pod-network-test-5366" to be "running" - Jul 29 16:50:30.945: INFO: Pod "host-test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 5.799115ms - Jul 29 16:50:30.945: INFO: Pod "host-test-container-pod" satisfied condition "running" - Jul 29 16:50:30.950: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 - Jul 29 16:50:30.951: INFO: Going to poll 10.233.64.28 on port 8081 at least 0 times, with a maximum of 39 tries before failing - Jul 29 16:50:30.956: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.64.28 8081 | grep -v '^\s*$'] Namespace:pod-network-test-5366 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:50:30.956: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:50:30.957: INFO: ExecWithOptions: Clientset creation - Jul 29 16:50:30.957: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-5366/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.64.28+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) - Jul 29 16:50:32.099: INFO: Found all 1 expected endpoints: [netserver-0] - Jul 29 16:50:32.099: INFO: Going to poll 10.233.65.125 on port 8081 at least 0 times, with a maximum of 39 tries before failing - Jul 29 16:50:32.106: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.65.125 8081 | grep -v '^\s*$'] Namespace:pod-network-test-5366 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:50:32.106: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:50:32.109: INFO: ExecWithOptions: Clientset creation - Jul 29 16:50:32.109: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-5366/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.65.125+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) - Jul 29 16:50:33.210: INFO: Found all 1 expected endpoints: [netserver-1] - Jul 29 16:50:33.210: INFO: Going to poll 10.233.66.117 on port 8081 at least 0 times, with a maximum of 39 tries before failing - Jul 29 16:50:33.219: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 10.233.66.117 8081 | grep -v '^\s*$'] Namespace:pod-network-test-5366 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:50:33.219: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:50:33.221: INFO: ExecWithOptions: Clientset creation - Jul 29 16:50:33.221: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-5366/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostName+%7C+nc+-w+1+-u+10.233.66.117+8081+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) - Jul 29 16:50:34.321: INFO: Found all 1 expected endpoints: [netserver-2] - [AfterEach] [sig-network] Networking + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should find a service from listing all namespaces [Conformance] + test/e2e/network/service.go:3219 + STEP: fetching services 08/24/23 12:51:36.273 + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:34.321: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Networking + Aug 24 12:51:36.279: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Networking + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Networking + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "pod-network-test-5366" for this suite. 07/29/23 16:50:34.332 + STEP: Destroying namespace "services-5794" for this suite. 08/24/23 12:51:36.289 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Sysctls [LinuxOnly] [NodeConformance] - should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] - test/e2e/common/node/sysctl.go:123 -[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] - test/e2e/common/node/sysctl.go:37 -[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +[sig-network] EndpointSlice + should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] + test/e2e/network/endpointslice.go:205 +[BeforeEach] [sig-network] EndpointSlice set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:34.349 -Jul 29 16:50:34.350: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sysctl 07/29/23 16:50:34.351 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:34.383 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:34.39 -[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +STEP: Creating a kubernetes client 08/24/23 12:51:36.311 +Aug 24 12:51:36.311: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename endpointslice 08/24/23 12:51:36.313 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:36.338 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:36.343 +[BeforeEach] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] - test/e2e/common/node/sysctl.go:67 -[It] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] - test/e2e/common/node/sysctl.go:123 -STEP: Creating a pod with one valid and two invalid sysctls 07/29/23 16:50:34.394 -[AfterEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +[BeforeEach] [sig-network] EndpointSlice + test/e2e/network/endpointslice.go:52 +[It] should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] + test/e2e/network/endpointslice.go:205 +STEP: referencing a single matching pod 08/24/23 12:51:41.524 +STEP: referencing matching pods with named port 08/24/23 12:51:46.544 +STEP: creating empty Endpoints and EndpointSlices for no matching Pods 08/24/23 12:51:51.566 +STEP: recreating EndpointSlices after they've been deleted 08/24/23 12:51:56.583 +Aug 24 12:51:56.622: INFO: EndpointSlice for Service endpointslice-647/example-named-port not found +[AfterEach] [sig-network] EndpointSlice test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:34.406: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +Aug 24 12:52:06.649: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +[DeferCleanup (Each)] [sig-network] EndpointSlice dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] +[DeferCleanup (Each)] [sig-network] EndpointSlice tear down framework | framework.go:193 -STEP: Destroying namespace "sysctl-7518" for this suite. 07/29/23 16:50:34.416 +STEP: Destroying namespace "endpointslice-647" for this suite. 08/24/23 12:52:06.657 ------------------------------ -• [0.081 seconds] -[sig-node] Sysctls [LinuxOnly] [NodeConformance] -test/e2e/common/node/framework.go:23 - should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] - test/e2e/common/node/sysctl.go:123 +• [SLOW TEST] [30.357 seconds] +[sig-network] EndpointSlice +test/e2e/network/common/framework.go:23 + should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] + test/e2e/network/endpointslice.go:205 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] - test/e2e/common/node/sysctl.go:37 - [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + [BeforeEach] [sig-network] EndpointSlice set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:34.349 - Jul 29 16:50:34.350: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sysctl 07/29/23 16:50:34.351 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:34.383 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:34.39 - [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + STEP: Creating a kubernetes client 08/24/23 12:51:36.311 + Aug 24 12:51:36.311: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename endpointslice 08/24/23 12:51:36.313 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:51:36.338 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:51:36.343 + [BeforeEach] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] - test/e2e/common/node/sysctl.go:67 - [It] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] - test/e2e/common/node/sysctl.go:123 - STEP: Creating a pod with one valid and two invalid sysctls 07/29/23 16:50:34.394 - [AfterEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + [BeforeEach] [sig-network] EndpointSlice + test/e2e/network/endpointslice.go:52 + [It] should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] + test/e2e/network/endpointslice.go:205 + STEP: referencing a single matching pod 08/24/23 12:51:41.524 + STEP: referencing matching pods with named port 08/24/23 12:51:46.544 + STEP: creating empty Endpoints and EndpointSlices for no matching Pods 08/24/23 12:51:51.566 + STEP: recreating EndpointSlices after they've been deleted 08/24/23 12:51:56.583 + Aug 24 12:51:56.622: INFO: EndpointSlice for Service endpointslice-647/example-named-port not found + [AfterEach] [sig-network] EndpointSlice test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:34.406: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + Aug 24 12:52:06.649: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] EndpointSlice test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + [DeferCleanup (Each)] [sig-network] EndpointSlice dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + [DeferCleanup (Each)] [sig-network] EndpointSlice tear down framework | framework.go:193 - STEP: Destroying namespace "sysctl-7518" for this suite. 07/29/23 16:50:34.416 + STEP: Destroying namespace "endpointslice-647" for this suite. 08/24/23 12:52:06.657 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should be able to update and delete ResourceQuota. [Conformance] - test/e2e/apimachinery/resource_quota.go:884 -[BeforeEach] [sig-api-machinery] ResourceQuota +[sig-storage] EmptyDir volumes + should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:147 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:34.439 -Jul 29 16:50:34.439: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 16:50:34.44 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:34.499 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:34.517 -[BeforeEach] [sig-api-machinery] ResourceQuota +STEP: Creating a kubernetes client 08/24/23 12:52:06.679 +Aug 24 12:52:06.679: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:52:06.681 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:06.707 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:06.712 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[It] should be able to update and delete ResourceQuota. [Conformance] - test/e2e/apimachinery/resource_quota.go:884 -STEP: Creating a ResourceQuota 07/29/23 16:50:34.538 -STEP: Getting a ResourceQuota 07/29/23 16:50:34.547 -STEP: Updating a ResourceQuota 07/29/23 16:50:34.557 -STEP: Verifying a ResourceQuota was modified 07/29/23 16:50:34.567 -STEP: Deleting a ResourceQuota 07/29/23 16:50:34.572 -STEP: Verifying the deleted ResourceQuota 07/29/23 16:50:34.585 -[AfterEach] [sig-api-machinery] ResourceQuota +[It] should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:147 +STEP: Creating a pod to test emptydir 0777 on tmpfs 08/24/23 12:52:06.717 +Aug 24 12:52:06.734: INFO: Waiting up to 5m0s for pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176" in namespace "emptydir-274" to be "Succeeded or Failed" +Aug 24 12:52:06.745: INFO: Pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176": Phase="Pending", Reason="", readiness=false. Elapsed: 10.797918ms +Aug 24 12:52:08.753: INFO: Pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176": Phase="Running", Reason="", readiness=false. Elapsed: 2.019449407s +Aug 24 12:52:10.754: INFO: Pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176": Phase="Running", Reason="", readiness=false. Elapsed: 4.020020792s +Aug 24 12:52:12.758: INFO: Pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.024407564s +STEP: Saw pod success 08/24/23 12:52:12.758 +Aug 24 12:52:12.759: INFO: Pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176" satisfied condition "Succeeded or Failed" +Aug 24 12:52:12.770: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-6fb0ea8b-ac36-4936-932d-47bdebabc176 container test-container: +STEP: delete the pod 08/24/23 12:52:12.79 +Aug 24 12:52:12.809: INFO: Waiting for pod pod-6fb0ea8b-ac36-4936-932d-47bdebabc176 to disappear +Aug 24 12:52:12.818: INFO: Pod pod-6fb0ea8b-ac36-4936-932d-47bdebabc176 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:34.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +Aug 24 12:52:12.818: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-3483" for this suite. 07/29/23 16:50:34.598 +STEP: Destroying namespace "emptydir-274" for this suite. 08/24/23 12:52:12.83 ------------------------------ -• [0.170 seconds] -[sig-api-machinery] ResourceQuota -test/e2e/apimachinery/framework.go:23 - should be able to update and delete ResourceQuota. [Conformance] - test/e2e/apimachinery/resource_quota.go:884 +• [SLOW TEST] [6.163 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:147 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:34.439 - Jul 29 16:50:34.439: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 16:50:34.44 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:34.499 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:34.517 - [BeforeEach] [sig-api-machinery] ResourceQuota + STEP: Creating a kubernetes client 08/24/23 12:52:06.679 + Aug 24 12:52:06.679: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:52:06.681 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:06.707 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:06.712 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [It] should be able to update and delete ResourceQuota. [Conformance] - test/e2e/apimachinery/resource_quota.go:884 - STEP: Creating a ResourceQuota 07/29/23 16:50:34.538 - STEP: Getting a ResourceQuota 07/29/23 16:50:34.547 - STEP: Updating a ResourceQuota 07/29/23 16:50:34.557 - STEP: Verifying a ResourceQuota was modified 07/29/23 16:50:34.567 - STEP: Deleting a ResourceQuota 07/29/23 16:50:34.572 - STEP: Verifying the deleted ResourceQuota 07/29/23 16:50:34.585 - [AfterEach] [sig-api-machinery] ResourceQuota + [It] should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:147 + STEP: Creating a pod to test emptydir 0777 on tmpfs 08/24/23 12:52:06.717 + Aug 24 12:52:06.734: INFO: Waiting up to 5m0s for pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176" in namespace "emptydir-274" to be "Succeeded or Failed" + Aug 24 12:52:06.745: INFO: Pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176": Phase="Pending", Reason="", readiness=false. Elapsed: 10.797918ms + Aug 24 12:52:08.753: INFO: Pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176": Phase="Running", Reason="", readiness=false. Elapsed: 2.019449407s + Aug 24 12:52:10.754: INFO: Pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176": Phase="Running", Reason="", readiness=false. Elapsed: 4.020020792s + Aug 24 12:52:12.758: INFO: Pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.024407564s + STEP: Saw pod success 08/24/23 12:52:12.758 + Aug 24 12:52:12.759: INFO: Pod "pod-6fb0ea8b-ac36-4936-932d-47bdebabc176" satisfied condition "Succeeded or Failed" + Aug 24 12:52:12.770: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-6fb0ea8b-ac36-4936-932d-47bdebabc176 container test-container: + STEP: delete the pod 08/24/23 12:52:12.79 + Aug 24 12:52:12.809: INFO: Waiting for pod pod-6fb0ea8b-ac36-4936-932d-47bdebabc176 to disappear + Aug 24 12:52:12.818: INFO: Pod pod-6fb0ea8b-ac36-4936-932d-47bdebabc176 no longer exists + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:34.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + Aug 24 12:52:12.818: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-3483" for this suite. 07/29/23 16:50:34.598 + STEP: Destroying namespace "emptydir-274" for this suite. 08/24/23 12:52:12.83 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:57 -[BeforeEach] [sig-storage] Projected configMap +[sig-node] Pods + should be updated [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:344 +[BeforeEach] [sig-node] Pods set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:34.612 -Jul 29 16:50:34.612: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:50:34.614 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:34.637 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:34.642 -[BeforeEach] [sig-storage] Projected configMap +STEP: Creating a kubernetes client 08/24/23 12:52:12.848 +Aug 24 12:52:12.848: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 12:52:12.851 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:12.877 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:12.882 +[BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:57 -STEP: Creating configMap with name projected-configmap-test-volume-d8496ed6-0fbb-485b-bc3f-cf98a1e58dc4 07/29/23 16:50:34.646 -STEP: Creating a pod to test consume configMaps 07/29/23 16:50:34.655 -Jul 29 16:50:34.672: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5" in namespace "projected-6812" to be "Succeeded or Failed" -Jul 29 16:50:34.687: INFO: Pod "pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5": Phase="Pending", Reason="", readiness=false. Elapsed: 14.177606ms -Jul 29 16:50:36.695: INFO: Pod "pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022799668s -Jul 29 16:50:38.696: INFO: Pod "pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.023518649s -STEP: Saw pod success 07/29/23 16:50:38.696 -Jul 29 16:50:38.696: INFO: Pod "pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5" satisfied condition "Succeeded or Failed" -Jul 29 16:50:38.703: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5 container agnhost-container: -STEP: delete the pod 07/29/23 16:50:38.717 -Jul 29 16:50:38.742: INFO: Waiting for pod pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5 to disappear -Jul 29 16:50:38.752: INFO: Pod pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5 no longer exists -[AfterEach] [sig-storage] Projected configMap +[BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 +[It] should be updated [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:344 +STEP: creating the pod 08/24/23 12:52:12.888 +STEP: submitting the pod to kubernetes 08/24/23 12:52:12.888 +Aug 24 12:52:12.903: INFO: Waiting up to 5m0s for pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b" in namespace "pods-2510" to be "running and ready" +Aug 24 12:52:12.909: INFO: Pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b": Phase="Pending", Reason="", readiness=false. Elapsed: 5.954585ms +Aug 24 12:52:12.909: INFO: The phase of Pod pod-update-fa459655-e0c0-4766-befe-20c66240b18b is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:52:14.918: INFO: Pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b": Phase="Running", Reason="", readiness=true. Elapsed: 2.014195722s +Aug 24 12:52:14.918: INFO: The phase of Pod pod-update-fa459655-e0c0-4766-befe-20c66240b18b is Running (Ready = true) +Aug 24 12:52:14.918: INFO: Pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b" satisfied condition "running and ready" +STEP: verifying the pod is in kubernetes 08/24/23 12:52:14.923 +STEP: updating the pod 08/24/23 12:52:14.927 +Aug 24 12:52:15.453: INFO: Successfully updated pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b" +Aug 24 12:52:15.453: INFO: Waiting up to 5m0s for pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b" in namespace "pods-2510" to be "running" +Aug 24 12:52:15.460: INFO: Pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b": Phase="Running", Reason="", readiness=true. Elapsed: 6.85731ms +Aug 24 12:52:15.461: INFO: Pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b" satisfied condition "running" +STEP: verifying the updated pod is in kubernetes 08/24/23 12:52:15.461 +Aug 24 12:52:15.480: INFO: Pod update OK +[AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:38.752: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected configMap +Aug 24 12:52:15.481: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 -STEP: Destroying namespace "projected-6812" for this suite. 07/29/23 16:50:38.765 +STEP: Destroying namespace "pods-2510" for this suite. 08/24/23 12:52:15.489 ------------------------------ -• [4.172 seconds] -[sig-storage] Projected configMap -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:57 +• [2.653 seconds] +[sig-node] Pods +test/e2e/common/node/framework.go:23 + should be updated [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:344 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected configMap + [BeforeEach] [sig-node] Pods set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:34.612 - Jul 29 16:50:34.612: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:50:34.614 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:34.637 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:34.642 - [BeforeEach] [sig-storage] Projected configMap + STEP: Creating a kubernetes client 08/24/23 12:52:12.848 + Aug 24 12:52:12.848: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 12:52:12.851 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:12.877 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:12.882 + [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:57 - STEP: Creating configMap with name projected-configmap-test-volume-d8496ed6-0fbb-485b-bc3f-cf98a1e58dc4 07/29/23 16:50:34.646 - STEP: Creating a pod to test consume configMaps 07/29/23 16:50:34.655 - Jul 29 16:50:34.672: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5" in namespace "projected-6812" to be "Succeeded or Failed" - Jul 29 16:50:34.687: INFO: Pod "pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5": Phase="Pending", Reason="", readiness=false. Elapsed: 14.177606ms - Jul 29 16:50:36.695: INFO: Pod "pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022799668s - Jul 29 16:50:38.696: INFO: Pod "pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.023518649s - STEP: Saw pod success 07/29/23 16:50:38.696 - Jul 29 16:50:38.696: INFO: Pod "pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5" satisfied condition "Succeeded or Failed" - Jul 29 16:50:38.703: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5 container agnhost-container: - STEP: delete the pod 07/29/23 16:50:38.717 - Jul 29 16:50:38.742: INFO: Waiting for pod pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5 to disappear - Jul 29 16:50:38.752: INFO: Pod pod-projected-configmaps-9d55aec7-08ec-4a76-be32-c1d9762c5fc5 no longer exists - [AfterEach] [sig-storage] Projected configMap + [BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 + [It] should be updated [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:344 + STEP: creating the pod 08/24/23 12:52:12.888 + STEP: submitting the pod to kubernetes 08/24/23 12:52:12.888 + Aug 24 12:52:12.903: INFO: Waiting up to 5m0s for pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b" in namespace "pods-2510" to be "running and ready" + Aug 24 12:52:12.909: INFO: Pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b": Phase="Pending", Reason="", readiness=false. Elapsed: 5.954585ms + Aug 24 12:52:12.909: INFO: The phase of Pod pod-update-fa459655-e0c0-4766-befe-20c66240b18b is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:52:14.918: INFO: Pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b": Phase="Running", Reason="", readiness=true. Elapsed: 2.014195722s + Aug 24 12:52:14.918: INFO: The phase of Pod pod-update-fa459655-e0c0-4766-befe-20c66240b18b is Running (Ready = true) + Aug 24 12:52:14.918: INFO: Pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b" satisfied condition "running and ready" + STEP: verifying the pod is in kubernetes 08/24/23 12:52:14.923 + STEP: updating the pod 08/24/23 12:52:14.927 + Aug 24 12:52:15.453: INFO: Successfully updated pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b" + Aug 24 12:52:15.453: INFO: Waiting up to 5m0s for pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b" in namespace "pods-2510" to be "running" + Aug 24 12:52:15.460: INFO: Pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b": Phase="Running", Reason="", readiness=true. Elapsed: 6.85731ms + Aug 24 12:52:15.461: INFO: Pod "pod-update-fa459655-e0c0-4766-befe-20c66240b18b" satisfied condition "running" + STEP: verifying the updated pod is in kubernetes 08/24/23 12:52:15.461 + Aug 24 12:52:15.480: INFO: Pod update OK + [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:38.752: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected configMap + Aug 24 12:52:15.481: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 - STEP: Destroying namespace "projected-6812" for this suite. 07/29/23 16:50:38.765 + STEP: Destroying namespace "pods-2510" for this suite. 08/24/23 12:52:15.489 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] ReplicationController - should adopt matching pods on creation [Conformance] - test/e2e/apps/rc.go:92 -[BeforeEach] [sig-apps] ReplicationController +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate custom resource [Conformance] + test/e2e/apimachinery/webhook.go:291 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:38.788 -Jul 29 16:50:38.788: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replication-controller 07/29/23 16:50:38.791 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:38.824 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:38.826 -[BeforeEach] [sig-apps] ReplicationController +STEP: Creating a kubernetes client 08/24/23 12:52:15.509 +Aug 24 12:52:15.509: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 12:52:15.511 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:15.536 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:15.542 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 -[It] should adopt matching pods on creation [Conformance] - test/e2e/apps/rc.go:92 -STEP: Given a Pod with a 'name' label pod-adoption is created 07/29/23 16:50:38.833 -Jul 29 16:50:38.852: INFO: Waiting up to 5m0s for pod "pod-adoption" in namespace "replication-controller-6698" to be "running and ready" -Jul 29 16:50:38.863: INFO: Pod "pod-adoption": Phase="Pending", Reason="", readiness=false. Elapsed: 10.536091ms -Jul 29 16:50:38.863: INFO: The phase of Pod pod-adoption is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:50:40.873: INFO: Pod "pod-adoption": Phase="Running", Reason="", readiness=true. Elapsed: 2.020156097s -Jul 29 16:50:40.873: INFO: The phase of Pod pod-adoption is Running (Ready = true) -Jul 29 16:50:40.873: INFO: Pod "pod-adoption" satisfied condition "running and ready" -STEP: When a replication controller with a matching selector is created 07/29/23 16:50:40.88 -STEP: Then the orphan pod is adopted 07/29/23 16:50:40.892 -[AfterEach] [sig-apps] ReplicationController +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 12:52:15.576 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:52:16.713 +STEP: Deploying the webhook pod 08/24/23 12:52:16.724 +STEP: Wait for the deployment to be ready 08/24/23 12:52:16.745 +Aug 24 12:52:16.757: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +STEP: Deploying the webhook service 08/24/23 12:52:18.775 +STEP: Verifying the service has paired with the endpoint 08/24/23 12:52:18.814 +Aug 24 12:52:19.816: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate custom resource [Conformance] + test/e2e/apimachinery/webhook.go:291 +Aug 24 12:52:19.824: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Registering the mutating webhook for custom resource e2e-test-webhook-7273-crds.webhook.example.com via the AdmissionRegistration API 08/24/23 12:52:20.351 +STEP: Creating a custom resource that should be mutated by the webhook 08/24/23 12:52:20.405 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:41.912: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ReplicationController +Aug 24 12:52:23.139: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ReplicationController +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ReplicationController +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "replication-controller-6698" for this suite. 07/29/23 16:50:41.922 +STEP: Destroying namespace "webhook-4489" for this suite. 08/24/23 12:52:23.268 +STEP: Destroying namespace "webhook-4489-markers" for this suite. 08/24/23 12:52:23.28 ------------------------------ -• [3.146 seconds] -[sig-apps] ReplicationController -test/e2e/apps/framework.go:23 - should adopt matching pods on creation [Conformance] - test/e2e/apps/rc.go:92 +• [SLOW TEST] [7.799 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should mutate custom resource [Conformance] + test/e2e/apimachinery/webhook.go:291 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ReplicationController + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:38.788 - Jul 29 16:50:38.788: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replication-controller 07/29/23 16:50:38.791 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:38.824 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:38.826 - [BeforeEach] [sig-apps] ReplicationController + STEP: Creating a kubernetes client 08/24/23 12:52:15.509 + Aug 24 12:52:15.509: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 12:52:15.511 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:15.536 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:15.542 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 - [It] should adopt matching pods on creation [Conformance] - test/e2e/apps/rc.go:92 - STEP: Given a Pod with a 'name' label pod-adoption is created 07/29/23 16:50:38.833 - Jul 29 16:50:38.852: INFO: Waiting up to 5m0s for pod "pod-adoption" in namespace "replication-controller-6698" to be "running and ready" - Jul 29 16:50:38.863: INFO: Pod "pod-adoption": Phase="Pending", Reason="", readiness=false. Elapsed: 10.536091ms - Jul 29 16:50:38.863: INFO: The phase of Pod pod-adoption is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:50:40.873: INFO: Pod "pod-adoption": Phase="Running", Reason="", readiness=true. Elapsed: 2.020156097s - Jul 29 16:50:40.873: INFO: The phase of Pod pod-adoption is Running (Ready = true) - Jul 29 16:50:40.873: INFO: Pod "pod-adoption" satisfied condition "running and ready" - STEP: When a replication controller with a matching selector is created 07/29/23 16:50:40.88 - STEP: Then the orphan pod is adopted 07/29/23 16:50:40.892 - [AfterEach] [sig-apps] ReplicationController + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 12:52:15.576 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:52:16.713 + STEP: Deploying the webhook pod 08/24/23 12:52:16.724 + STEP: Wait for the deployment to be ready 08/24/23 12:52:16.745 + Aug 24 12:52:16.757: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created + STEP: Deploying the webhook service 08/24/23 12:52:18.775 + STEP: Verifying the service has paired with the endpoint 08/24/23 12:52:18.814 + Aug 24 12:52:19.816: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should mutate custom resource [Conformance] + test/e2e/apimachinery/webhook.go:291 + Aug 24 12:52:19.824: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Registering the mutating webhook for custom resource e2e-test-webhook-7273-crds.webhook.example.com via the AdmissionRegistration API 08/24/23 12:52:20.351 + STEP: Creating a custom resource that should be mutated by the webhook 08/24/23 12:52:20.405 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:41.912: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ReplicationController + Aug 24 12:52:23.139: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ReplicationController + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ReplicationController + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "replication-controller-6698" for this suite. 07/29/23 16:50:41.922 + STEP: Destroying namespace "webhook-4489" for this suite. 08/24/23 12:52:23.268 + STEP: Destroying namespace "webhook-4489-markers" for this suite. 08/24/23 12:52:23.28 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSS +SSSSS ------------------------------ -[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - should be able to convert from CR v1 to CR v2 [Conformance] - test/e2e/apimachinery/crd_conversion_webhook.go:149 -[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:89 +[BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:41.937 -Jul 29 16:50:41.937: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-webhook 07/29/23 16:50:41.941 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:41.975 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:41.981 -[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:52:23.308 +Aug 24 12:52:23.309: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:52:23.326 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:23.374 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:23.387 +[BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:128 -STEP: Setting up server cert 07/29/23 16:50:41.987 -STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication 07/29/23 16:50:42.353 -STEP: Deploying the custom resource conversion webhook pod 07/29/23 16:50:42.372 -STEP: Wait for the deployment to be ready 07/29/23 16:50:42.389 -Jul 29 16:50:42.398: INFO: new replicaset for deployment "sample-crd-conversion-webhook-deployment" is yet to be created -STEP: Deploying the webhook service 07/29/23 16:50:44.42 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:50:44.439 -Jul 29 16:50:45.439: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 -[It] should be able to convert from CR v1 to CR v2 [Conformance] - test/e2e/apimachinery/crd_conversion_webhook.go:149 -Jul 29 16:50:45.449: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Creating a v1 custom resource 07/29/23 16:50:48.384 -STEP: v2 custom resource should be converted 07/29/23 16:50:48.4 -[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:89 +STEP: Creating configMap with name projected-configmap-test-volume-map-e9a6a897-7fb4-40b2-aa46-1d0155180810 08/24/23 12:52:23.392 +STEP: Creating a pod to test consume configMaps 08/24/23 12:52:23.402 +Aug 24 12:52:23.415: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6" in namespace "projected-4971" to be "Succeeded or Failed" +Aug 24 12:52:23.423: INFO: Pod "pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6": Phase="Pending", Reason="", readiness=false. Elapsed: 8.202365ms +Aug 24 12:52:25.431: INFO: Pod "pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016161998s +Aug 24 12:52:27.437: INFO: Pod "pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02250166s +STEP: Saw pod success 08/24/23 12:52:27.438 +Aug 24 12:52:27.438: INFO: Pod "pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6" satisfied condition "Succeeded or Failed" +Aug 24 12:52:27.447: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6 container agnhost-container: +STEP: delete the pod 08/24/23 12:52:27.457 +Aug 24 12:52:27.485: INFO: Waiting for pod pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6 to disappear +Aug 24 12:52:27.492: INFO: Pod pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6 no longer exists +[AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:48.941: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:139 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +Aug 24 12:52:27.493: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 -STEP: Destroying namespace "crd-webhook-3655" for this suite. 07/29/23 16:50:49.067 +STEP: Destroying namespace "projected-4971" for this suite. 08/24/23 12:52:27.501 ------------------------------ -• [SLOW TEST] [7.185 seconds] -[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should be able to convert from CR v1 to CR v2 [Conformance] - test/e2e/apimachinery/crd_conversion_webhook.go:149 +• [4.203 seconds] +[sig-storage] Projected configMap +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:89 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:41.937 - Jul 29 16:50:41.937: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-webhook 07/29/23 16:50:41.941 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:41.975 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:41.981 - [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:52:23.308 + Aug 24 12:52:23.309: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:52:23.326 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:23.374 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:23.387 + [BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:128 - STEP: Setting up server cert 07/29/23 16:50:41.987 - STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication 07/29/23 16:50:42.353 - STEP: Deploying the custom resource conversion webhook pod 07/29/23 16:50:42.372 - STEP: Wait for the deployment to be ready 07/29/23 16:50:42.389 - Jul 29 16:50:42.398: INFO: new replicaset for deployment "sample-crd-conversion-webhook-deployment" is yet to be created - STEP: Deploying the webhook service 07/29/23 16:50:44.42 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:50:44.439 - Jul 29 16:50:45.439: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 - [It] should be able to convert from CR v1 to CR v2 [Conformance] - test/e2e/apimachinery/crd_conversion_webhook.go:149 - Jul 29 16:50:45.449: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Creating a v1 custom resource 07/29/23 16:50:48.384 - STEP: v2 custom resource should be converted 07/29/23 16:50:48.4 - [AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + [It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:89 + STEP: Creating configMap with name projected-configmap-test-volume-map-e9a6a897-7fb4-40b2-aa46-1d0155180810 08/24/23 12:52:23.392 + STEP: Creating a pod to test consume configMaps 08/24/23 12:52:23.402 + Aug 24 12:52:23.415: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6" in namespace "projected-4971" to be "Succeeded or Failed" + Aug 24 12:52:23.423: INFO: Pod "pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6": Phase="Pending", Reason="", readiness=false. Elapsed: 8.202365ms + Aug 24 12:52:25.431: INFO: Pod "pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016161998s + Aug 24 12:52:27.437: INFO: Pod "pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02250166s + STEP: Saw pod success 08/24/23 12:52:27.438 + Aug 24 12:52:27.438: INFO: Pod "pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6" satisfied condition "Succeeded or Failed" + Aug 24 12:52:27.447: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6 container agnhost-container: + STEP: delete the pod 08/24/23 12:52:27.457 + Aug 24 12:52:27.485: INFO: Waiting for pod pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6 to disappear + Aug 24 12:52:27.492: INFO: Pod pod-projected-configmaps-6a2479f2-f422-4763-85ba-d15f866bdce6 no longer exists + [AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:48.941: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/crd_conversion_webhook.go:139 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + Aug 24 12:52:27.493: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 - STEP: Destroying namespace "crd-webhook-3655" for this suite. 07/29/23 16:50:49.067 + STEP: Destroying namespace "projected-4971" for this suite. 08/24/23 12:52:27.501 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSS ------------------------------ -[sig-node] RuntimeClass - should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:129 -[BeforeEach] [sig-node] RuntimeClass +[sig-api-machinery] Namespaces [Serial] + should ensure that all pods are removed when a namespace is deleted [Conformance] + test/e2e/apimachinery/namespace.go:243 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:49.13 -Jul 29 16:50:49.130: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename runtimeclass 07/29/23 16:50:49.143 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:49.177 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:49.188 -[BeforeEach] [sig-node] RuntimeClass +STEP: Creating a kubernetes client 08/24/23 12:52:27.512 +Aug 24 12:52:27.512: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename namespaces 08/24/23 12:52:27.515 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:27.564 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:27.57 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:129 -Jul 29 16:50:49.237: INFO: Waiting up to 1m20s for at least 1 pods in namespace runtimeclass-6747 to be scheduled -Jul 29 16:50:49.246: INFO: 1 pods are not scheduled: [runtimeclass-6747/test-runtimeclass-runtimeclass-6747-preconfigured-handler-z29pb(bbe3a8bc-1497-407d-a70e-d0b329a0db5f)] -[AfterEach] [sig-node] RuntimeClass +[It] should ensure that all pods are removed when a namespace is deleted [Conformance] + test/e2e/apimachinery/namespace.go:243 +STEP: Creating a test namespace 08/24/23 12:52:27.574 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:27.598 +STEP: Creating a pod in the namespace 08/24/23 12:52:27.603 +STEP: Waiting for the pod to have running status 08/24/23 12:52:27.615 +Aug 24 12:52:27.615: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "nsdeletetest-2593" to be "running" +Aug 24 12:52:27.624: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 8.717765ms +Aug 24 12:52:29.633: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.017907745s +Aug 24 12:52:29.634: INFO: Pod "test-pod" satisfied condition "running" +STEP: Deleting the namespace 08/24/23 12:52:29.634 +STEP: Waiting for the namespace to be removed. 08/24/23 12:52:29.648 +STEP: Recreating the namespace 08/24/23 12:52:40.655 +STEP: Verifying there are no pods in the namespace 08/24/23 12:52:40.688 +[AfterEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:51.265: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] RuntimeClass +Aug 24 12:52:40.701: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] RuntimeClass +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] RuntimeClass +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "runtimeclass-6747" for this suite. 07/29/23 16:50:51.275 +STEP: Destroying namespace "namespaces-2896" for this suite. 08/24/23 12:52:40.711 +STEP: Destroying namespace "nsdeletetest-2593" for this suite. 08/24/23 12:52:40.724 +Aug 24 12:52:40.736: INFO: Namespace nsdeletetest-2593 was already deleted +STEP: Destroying namespace "nsdeletetest-9708" for this suite. 08/24/23 12:52:40.736 ------------------------------ -• [2.156 seconds] -[sig-node] RuntimeClass -test/e2e/common/node/framework.go:23 - should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:129 +• [SLOW TEST] [13.240 seconds] +[sig-api-machinery] Namespaces [Serial] +test/e2e/apimachinery/framework.go:23 + should ensure that all pods are removed when a namespace is deleted [Conformance] + test/e2e/apimachinery/namespace.go:243 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] RuntimeClass + [BeforeEach] [sig-api-machinery] Namespaces [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:49.13 - Jul 29 16:50:49.130: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename runtimeclass 07/29/23 16:50:49.143 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:49.177 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:49.188 - [BeforeEach] [sig-node] RuntimeClass + STEP: Creating a kubernetes client 08/24/23 12:52:27.512 + Aug 24 12:52:27.512: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename namespaces 08/24/23 12:52:27.515 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:27.564 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:27.57 + [BeforeEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] - test/e2e/common/node/runtimeclass.go:129 - Jul 29 16:50:49.237: INFO: Waiting up to 1m20s for at least 1 pods in namespace runtimeclass-6747 to be scheduled - Jul 29 16:50:49.246: INFO: 1 pods are not scheduled: [runtimeclass-6747/test-runtimeclass-runtimeclass-6747-preconfigured-handler-z29pb(bbe3a8bc-1497-407d-a70e-d0b329a0db5f)] - [AfterEach] [sig-node] RuntimeClass + [It] should ensure that all pods are removed when a namespace is deleted [Conformance] + test/e2e/apimachinery/namespace.go:243 + STEP: Creating a test namespace 08/24/23 12:52:27.574 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:27.598 + STEP: Creating a pod in the namespace 08/24/23 12:52:27.603 + STEP: Waiting for the pod to have running status 08/24/23 12:52:27.615 + Aug 24 12:52:27.615: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "nsdeletetest-2593" to be "running" + Aug 24 12:52:27.624: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 8.717765ms + Aug 24 12:52:29.633: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.017907745s + Aug 24 12:52:29.634: INFO: Pod "test-pod" satisfied condition "running" + STEP: Deleting the namespace 08/24/23 12:52:29.634 + STEP: Waiting for the namespace to be removed. 08/24/23 12:52:29.648 + STEP: Recreating the namespace 08/24/23 12:52:40.655 + STEP: Verifying there are no pods in the namespace 08/24/23 12:52:40.688 + [AfterEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:51.265: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] RuntimeClass + Aug 24 12:52:40.701: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] RuntimeClass + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] RuntimeClass + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "runtimeclass-6747" for this suite. 07/29/23 16:50:51.275 + STEP: Destroying namespace "namespaces-2896" for this suite. 08/24/23 12:52:40.711 + STEP: Destroying namespace "nsdeletetest-2593" for this suite. 08/24/23 12:52:40.724 + Aug 24 12:52:40.736: INFO: Namespace nsdeletetest-2593 was already deleted + STEP: Destroying namespace "nsdeletetest-9708" for this suite. 08/24/23 12:52:40.736 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:375 -[BeforeEach] [sig-storage] Projected configMap +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + patching/updating a mutating webhook should work [Conformance] + test/e2e/apimachinery/webhook.go:508 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:51.292 -Jul 29 16:50:51.293: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:50:51.296 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:51.326 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:51.33 -[BeforeEach] [sig-storage] Projected configMap +STEP: Creating a kubernetes client 08/24/23 12:52:40.768 +Aug 24 12:52:40.768: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 12:52:40.772 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:40.806 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:40.813 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:375 -STEP: Creating configMap with name projected-configmap-test-volume-ebdddc9b-ffd3-4ff8-89f3-fd3189119140 07/29/23 16:50:51.334 -STEP: Creating a pod to test consume configMaps 07/29/23 16:50:51.34 -Jul 29 16:50:51.357: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f" in namespace "projected-5416" to be "Succeeded or Failed" -Jul 29 16:50:51.374: INFO: Pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f": Phase="Pending", Reason="", readiness=false. Elapsed: 16.431703ms -Jul 29 16:50:53.390: INFO: Pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f": Phase="Running", Reason="", readiness=true. Elapsed: 2.032800628s -Jul 29 16:50:55.383: INFO: Pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f": Phase="Running", Reason="", readiness=false. Elapsed: 4.025632528s -Jul 29 16:50:57.380: INFO: Pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.02271432s -STEP: Saw pod success 07/29/23 16:50:57.38 -Jul 29 16:50:57.380: INFO: Pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f" satisfied condition "Succeeded or Failed" -Jul 29 16:50:57.386: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f container projected-configmap-volume-test: -STEP: delete the pod 07/29/23 16:50:57.408 -Jul 29 16:50:57.450: INFO: Waiting for pod pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f to disappear -Jul 29 16:50:57.457: INFO: Pod pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f no longer exists -[AfterEach] [sig-storage] Projected configMap +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 12:52:40.846 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:52:41.297 +STEP: Deploying the webhook pod 08/24/23 12:52:41.312 +STEP: Wait for the deployment to be ready 08/24/23 12:52:41.334 +Aug 24 12:52:41.349: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service 08/24/23 12:52:43.38 +STEP: Verifying the service has paired with the endpoint 08/24/23 12:52:43.402 +Aug 24 12:52:44.404: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] patching/updating a mutating webhook should work [Conformance] + test/e2e/apimachinery/webhook.go:508 +STEP: Creating a mutating webhook configuration 08/24/23 12:52:44.418 +STEP: Updating a mutating webhook configuration's rules to not include the create operation 08/24/23 12:52:44.468 +STEP: Creating a configMap that should not be mutated 08/24/23 12:52:44.48 +STEP: Patching a mutating webhook configuration's rules to include the create operation 08/24/23 12:52:44.496 +STEP: Creating a configMap that should be mutated 08/24/23 12:52:44.511 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:50:57.457: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected configMap +Aug 24 12:52:44.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "projected-5416" for this suite. 07/29/23 16:50:57.466 +STEP: Destroying namespace "webhook-1332" for this suite. 08/24/23 12:52:44.663 +STEP: Destroying namespace "webhook-1332-markers" for this suite. 08/24/23 12:52:44.682 ------------------------------ -• [SLOW TEST] [6.185 seconds] -[sig-storage] Projected configMap -test/e2e/common/storage/framework.go:23 - should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:375 +• [3.928 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + patching/updating a mutating webhook should work [Conformance] + test/e2e/apimachinery/webhook.go:508 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected configMap + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:51.292 - Jul 29 16:50:51.293: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:50:51.296 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:51.326 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:51.33 - [BeforeEach] [sig-storage] Projected configMap + STEP: Creating a kubernetes client 08/24/23 12:52:40.768 + Aug 24 12:52:40.768: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 12:52:40.772 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:40.806 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:40.813 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:375 - STEP: Creating configMap with name projected-configmap-test-volume-ebdddc9b-ffd3-4ff8-89f3-fd3189119140 07/29/23 16:50:51.334 - STEP: Creating a pod to test consume configMaps 07/29/23 16:50:51.34 - Jul 29 16:50:51.357: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f" in namespace "projected-5416" to be "Succeeded or Failed" - Jul 29 16:50:51.374: INFO: Pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f": Phase="Pending", Reason="", readiness=false. Elapsed: 16.431703ms - Jul 29 16:50:53.390: INFO: Pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f": Phase="Running", Reason="", readiness=true. Elapsed: 2.032800628s - Jul 29 16:50:55.383: INFO: Pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f": Phase="Running", Reason="", readiness=false. Elapsed: 4.025632528s - Jul 29 16:50:57.380: INFO: Pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.02271432s - STEP: Saw pod success 07/29/23 16:50:57.38 - Jul 29 16:50:57.380: INFO: Pod "pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f" satisfied condition "Succeeded or Failed" - Jul 29 16:50:57.386: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f container projected-configmap-volume-test: - STEP: delete the pod 07/29/23 16:50:57.408 - Jul 29 16:50:57.450: INFO: Waiting for pod pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f to disappear - Jul 29 16:50:57.457: INFO: Pod pod-projected-configmaps-73908662-1105-48e7-980d-aa2c43d9887f no longer exists - [AfterEach] [sig-storage] Projected configMap + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 12:52:40.846 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:52:41.297 + STEP: Deploying the webhook pod 08/24/23 12:52:41.312 + STEP: Wait for the deployment to be ready 08/24/23 12:52:41.334 + Aug 24 12:52:41.349: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set + STEP: Deploying the webhook service 08/24/23 12:52:43.38 + STEP: Verifying the service has paired with the endpoint 08/24/23 12:52:43.402 + Aug 24 12:52:44.404: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] patching/updating a mutating webhook should work [Conformance] + test/e2e/apimachinery/webhook.go:508 + STEP: Creating a mutating webhook configuration 08/24/23 12:52:44.418 + STEP: Updating a mutating webhook configuration's rules to not include the create operation 08/24/23 12:52:44.468 + STEP: Creating a configMap that should not be mutated 08/24/23 12:52:44.48 + STEP: Patching a mutating webhook configuration's rules to include the create operation 08/24/23 12:52:44.496 + STEP: Creating a configMap that should be mutated 08/24/23 12:52:44.511 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:50:57.457: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected configMap + Aug 24 12:52:44.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "projected-5416" for this suite. 07/29/23 16:50:57.466 + STEP: Destroying namespace "webhook-1332" for this suite. 08/24/23 12:52:44.663 + STEP: Destroying namespace "webhook-1332-markers" for this suite. 08/24/23 12:52:44.682 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSS ------------------------------ -[sig-apps] Daemon set [Serial] - should rollback without unnecessary restarts [Conformance] - test/e2e/apps/daemon_set.go:443 -[BeforeEach] [sig-apps] Daemon set [Serial] +[sig-node] Variable Expansion + should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] + test/e2e/common/node/expansion.go:225 +[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:50:57.483 -Jul 29 16:50:57.483: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename daemonsets 07/29/23 16:50:57.487 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:57.522 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:57.537 -[BeforeEach] [sig-apps] Daemon set [Serial] +STEP: Creating a kubernetes client 08/24/23 12:52:44.703 +Aug 24 12:52:44.705: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename var-expansion 08/24/23 12:52:44.712 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:44.74 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:44.746 +[BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 -[It] should rollback without unnecessary restarts [Conformance] - test/e2e/apps/daemon_set.go:443 -Jul 29 16:50:57.599: INFO: Create a RollingUpdate DaemonSet -Jul 29 16:50:57.610: INFO: Check that daemon pods launch on every node of the cluster -Jul 29 16:50:57.621: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:50:57.622: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:50:58.645: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:50:58.646: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:50:59.640: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 -Jul 29 16:50:59.640: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set -Jul 29 16:50:59.640: INFO: Update the DaemonSet to trigger a rollout -Jul 29 16:50:59.656: INFO: Updating DaemonSet daemon-set -Jul 29 16:51:02.732: INFO: Roll back the DaemonSet before rollout is complete -Jul 29 16:51:02.754: INFO: Updating DaemonSet daemon-set -Jul 29 16:51:02.754: INFO: Make sure DaemonSet rollback is complete -Jul 29 16:51:02.767: INFO: Wrong image for pod: daemon-set-5g4sm. Expected: registry.k8s.io/e2e-test-images/httpd:2.4.38-4, got: foo:non-existent. -Jul 29 16:51:02.767: INFO: Pod daemon-set-5g4sm is not available -Jul 29 16:51:08.787: INFO: Pod daemon-set-gjgcq is not available -[AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 -STEP: Deleting DaemonSet "daemon-set" 07/29/23 16:51:08.817 -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-5773, will wait for the garbage collector to delete the pods 07/29/23 16:51:08.817 -Jul 29 16:51:08.903: INFO: Deleting DaemonSet.extensions daemon-set took: 24.289978ms -Jul 29 16:51:09.104: INFO: Terminating DaemonSet.extensions daemon-set pods took: 200.922011ms -Jul 29 16:51:11.618: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:51:11.618: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set -Jul 29 16:51:11.623: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"33326"},"items":null} - -Jul 29 16:51:11.627: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"33326"},"items":null} - -[AfterEach] [sig-apps] Daemon set [Serial] +[It] should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] + test/e2e/common/node/expansion.go:225 +STEP: creating the pod with failed condition 08/24/23 12:52:44.752 +Aug 24 12:52:44.773: INFO: Waiting up to 2m0s for pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" in namespace "var-expansion-3288" to be "running" +Aug 24 12:52:44.786: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 12.467141ms +Aug 24 12:52:46.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019810537s +Aug 24 12:52:48.799: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 4.026225847s +Aug 24 12:52:50.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 6.020565347s +Aug 24 12:52:52.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 8.022989172s +Aug 24 12:52:54.797: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 10.023358783s +Aug 24 12:52:56.797: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 12.023619186s +Aug 24 12:52:58.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 14.019307125s +Aug 24 12:53:00.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 16.0232011s +Aug 24 12:53:02.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 18.020031934s +Aug 24 12:53:04.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 20.019842395s +Aug 24 12:53:06.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 22.01894307s +Aug 24 12:53:08.800: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 24.027159788s +Aug 24 12:53:10.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 26.022278012s +Aug 24 12:53:12.799: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 28.025488579s +Aug 24 12:53:14.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 30.022333889s +Aug 24 12:53:16.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 32.020223641s +Aug 24 12:53:18.797: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 34.02415456s +Aug 24 12:53:20.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 36.019614186s +Aug 24 12:53:22.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 38.019275248s +Aug 24 12:53:24.801: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 40.028021342s +Aug 24 12:53:26.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 42.02104119s +Aug 24 12:53:28.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 44.023071981s +Aug 24 12:53:30.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 46.02047384s +Aug 24 12:53:32.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 48.022019974s +Aug 24 12:53:34.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 50.019897165s +Aug 24 12:53:36.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 52.022183457s +Aug 24 12:53:38.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 54.021512327s +Aug 24 12:53:40.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 56.022238419s +Aug 24 12:53:42.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 58.022937322s +Aug 24 12:53:44.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.020587458s +Aug 24 12:53:46.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.022707793s +Aug 24 12:53:48.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.022384391s +Aug 24 12:53:50.797: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.023881691s +Aug 24 12:53:52.799: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.025673235s +Aug 24 12:53:54.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.018430102s +Aug 24 12:53:56.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.020145911s +Aug 24 12:53:58.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.020787287s +Aug 24 12:54:00.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.022844591s +Aug 24 12:54:02.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.020904018s +Aug 24 12:54:04.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.021419708s +Aug 24 12:54:06.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.019310772s +Aug 24 12:54:08.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.022434757s +Aug 24 12:54:10.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.022073342s +Aug 24 12:54:12.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.021331326s +Aug 24 12:54:14.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.021009243s +Aug 24 12:54:16.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.020303635s +Aug 24 12:54:18.797: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.024205867s +Aug 24 12:54:20.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.020891064s +Aug 24 12:54:22.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.019483089s +Aug 24 12:54:24.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.02119001s +Aug 24 12:54:26.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.019918064s +Aug 24 12:54:28.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.020838882s +Aug 24 12:54:30.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.018921894s +Aug 24 12:54:32.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.020751858s +Aug 24 12:54:34.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.02179022s +Aug 24 12:54:36.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.020157686s +Aug 24 12:54:38.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.022010292s +Aug 24 12:54:40.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.022617565s +Aug 24 12:54:42.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.019907627s +Aug 24 12:54:44.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.022017979s +Aug 24 12:54:44.801: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.028212831s +STEP: updating the pod 08/24/23 12:54:44.802 +Aug 24 12:54:45.325: INFO: Successfully updated pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" +STEP: waiting for pod running 08/24/23 12:54:45.325 +Aug 24 12:54:45.325: INFO: Waiting up to 2m0s for pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" in namespace "var-expansion-3288" to be "running" +Aug 24 12:54:45.337: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 11.857727ms +Aug 24 12:54:47.348: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Running", Reason="", readiness=true. Elapsed: 2.02248608s +Aug 24 12:54:47.348: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" satisfied condition "running" +STEP: deleting the pod gracefully 08/24/23 12:54:47.348 +Aug 24 12:54:47.349: INFO: Deleting pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" in namespace "var-expansion-3288" +Aug 24 12:54:47.369: INFO: Wait up to 5m0s for pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" to be fully deleted +[AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:11.667: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +Aug 24 12:55:19.439: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +[DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] +[DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 -STEP: Destroying namespace "daemonsets-5773" for this suite. 07/29/23 16:51:11.709 +STEP: Destroying namespace "var-expansion-3288" for this suite. 08/24/23 12:55:19.458 ------------------------------ -• [SLOW TEST] [14.239 seconds] -[sig-apps] Daemon set [Serial] -test/e2e/apps/framework.go:23 - should rollback without unnecessary restarts [Conformance] - test/e2e/apps/daemon_set.go:443 +• [SLOW TEST] [154.777 seconds] +[sig-node] Variable Expansion +test/e2e/common/node/framework.go:23 + should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] + test/e2e/common/node/expansion.go:225 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Daemon set [Serial] + [BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:50:57.483 - Jul 29 16:50:57.483: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename daemonsets 07/29/23 16:50:57.487 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:50:57.522 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:50:57.537 - [BeforeEach] [sig-apps] Daemon set [Serial] + STEP: Creating a kubernetes client 08/24/23 12:52:44.703 + Aug 24 12:52:44.705: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename var-expansion 08/24/23 12:52:44.712 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:52:44.74 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:52:44.746 + [BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:157 - [It] should rollback without unnecessary restarts [Conformance] - test/e2e/apps/daemon_set.go:443 - Jul 29 16:50:57.599: INFO: Create a RollingUpdate DaemonSet - Jul 29 16:50:57.610: INFO: Check that daemon pods launch on every node of the cluster - Jul 29 16:50:57.621: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:50:57.622: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:50:58.645: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:50:58.646: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:50:59.640: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 - Jul 29 16:50:59.640: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set - Jul 29 16:50:59.640: INFO: Update the DaemonSet to trigger a rollout - Jul 29 16:50:59.656: INFO: Updating DaemonSet daemon-set - Jul 29 16:51:02.732: INFO: Roll back the DaemonSet before rollout is complete - Jul 29 16:51:02.754: INFO: Updating DaemonSet daemon-set - Jul 29 16:51:02.754: INFO: Make sure DaemonSet rollback is complete - Jul 29 16:51:02.767: INFO: Wrong image for pod: daemon-set-5g4sm. Expected: registry.k8s.io/e2e-test-images/httpd:2.4.38-4, got: foo:non-existent. - Jul 29 16:51:02.767: INFO: Pod daemon-set-5g4sm is not available - Jul 29 16:51:08.787: INFO: Pod daemon-set-gjgcq is not available - [AfterEach] [sig-apps] Daemon set [Serial] - test/e2e/apps/daemon_set.go:122 - STEP: Deleting DaemonSet "daemon-set" 07/29/23 16:51:08.817 - STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-5773, will wait for the garbage collector to delete the pods 07/29/23 16:51:08.817 - Jul 29 16:51:08.903: INFO: Deleting DaemonSet.extensions daemon-set took: 24.289978ms - Jul 29 16:51:09.104: INFO: Terminating DaemonSet.extensions daemon-set pods took: 200.922011ms - Jul 29 16:51:11.618: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:51:11.618: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set - Jul 29 16:51:11.623: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"33326"},"items":null} - - Jul 29 16:51:11.627: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"33326"},"items":null} - - [AfterEach] [sig-apps] Daemon set [Serial] + [It] should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] + test/e2e/common/node/expansion.go:225 + STEP: creating the pod with failed condition 08/24/23 12:52:44.752 + Aug 24 12:52:44.773: INFO: Waiting up to 2m0s for pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" in namespace "var-expansion-3288" to be "running" + Aug 24 12:52:44.786: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 12.467141ms + Aug 24 12:52:46.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019810537s + Aug 24 12:52:48.799: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 4.026225847s + Aug 24 12:52:50.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 6.020565347s + Aug 24 12:52:52.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 8.022989172s + Aug 24 12:52:54.797: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 10.023358783s + Aug 24 12:52:56.797: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 12.023619186s + Aug 24 12:52:58.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 14.019307125s + Aug 24 12:53:00.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 16.0232011s + Aug 24 12:53:02.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 18.020031934s + Aug 24 12:53:04.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 20.019842395s + Aug 24 12:53:06.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 22.01894307s + Aug 24 12:53:08.800: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 24.027159788s + Aug 24 12:53:10.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 26.022278012s + Aug 24 12:53:12.799: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 28.025488579s + Aug 24 12:53:14.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 30.022333889s + Aug 24 12:53:16.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 32.020223641s + Aug 24 12:53:18.797: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 34.02415456s + Aug 24 12:53:20.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 36.019614186s + Aug 24 12:53:22.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 38.019275248s + Aug 24 12:53:24.801: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 40.028021342s + Aug 24 12:53:26.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 42.02104119s + Aug 24 12:53:28.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 44.023071981s + Aug 24 12:53:30.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 46.02047384s + Aug 24 12:53:32.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 48.022019974s + Aug 24 12:53:34.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 50.019897165s + Aug 24 12:53:36.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 52.022183457s + Aug 24 12:53:38.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 54.021512327s + Aug 24 12:53:40.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 56.022238419s + Aug 24 12:53:42.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 58.022937322s + Aug 24 12:53:44.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m0.020587458s + Aug 24 12:53:46.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m2.022707793s + Aug 24 12:53:48.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m4.022384391s + Aug 24 12:53:50.797: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m6.023881691s + Aug 24 12:53:52.799: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m8.025673235s + Aug 24 12:53:54.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m10.018430102s + Aug 24 12:53:56.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m12.020145911s + Aug 24 12:53:58.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m14.020787287s + Aug 24 12:54:00.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m16.022844591s + Aug 24 12:54:02.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m18.020904018s + Aug 24 12:54:04.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m20.021419708s + Aug 24 12:54:06.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m22.019310772s + Aug 24 12:54:08.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m24.022434757s + Aug 24 12:54:10.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m26.022073342s + Aug 24 12:54:12.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m28.021331326s + Aug 24 12:54:14.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m30.021009243s + Aug 24 12:54:16.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m32.020303635s + Aug 24 12:54:18.797: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m34.024205867s + Aug 24 12:54:20.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m36.020891064s + Aug 24 12:54:22.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m38.019483089s + Aug 24 12:54:24.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m40.02119001s + Aug 24 12:54:26.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m42.019918064s + Aug 24 12:54:28.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m44.020838882s + Aug 24 12:54:30.792: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m46.018921894s + Aug 24 12:54:32.794: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m48.020751858s + Aug 24 12:54:34.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m50.02179022s + Aug 24 12:54:36.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m52.020157686s + Aug 24 12:54:38.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m54.022010292s + Aug 24 12:54:40.796: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m56.022617565s + Aug 24 12:54:42.793: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 1m58.019907627s + Aug 24 12:54:44.795: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.022017979s + Aug 24 12:54:44.801: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 2m0.028212831s + STEP: updating the pod 08/24/23 12:54:44.802 + Aug 24 12:54:45.325: INFO: Successfully updated pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" + STEP: waiting for pod running 08/24/23 12:54:45.325 + Aug 24 12:54:45.325: INFO: Waiting up to 2m0s for pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" in namespace "var-expansion-3288" to be "running" + Aug 24 12:54:45.337: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Pending", Reason="", readiness=false. Elapsed: 11.857727ms + Aug 24 12:54:47.348: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7": Phase="Running", Reason="", readiness=true. Elapsed: 2.02248608s + Aug 24 12:54:47.348: INFO: Pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" satisfied condition "running" + STEP: deleting the pod gracefully 08/24/23 12:54:47.348 + Aug 24 12:54:47.349: INFO: Deleting pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" in namespace "var-expansion-3288" + Aug 24 12:54:47.369: INFO: Wait up to 5m0s for pod "var-expansion-5b8771d2-fa4d-4e94-bf6e-0678636055c7" to be fully deleted + [AfterEach] [sig-node] Variable Expansion test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:11.667: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + Aug 24 12:55:19.439: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + [DeferCleanup (Each)] [sig-node] Variable Expansion dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] + [DeferCleanup (Each)] [sig-node] Variable Expansion tear down framework | framework.go:193 - STEP: Destroying namespace "daemonsets-5773" for this suite. 07/29/23 16:51:11.709 + STEP: Destroying namespace "var-expansion-3288" for this suite. 08/24/23 12:55:19.458 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSS +SSSS ------------------------------ -[sig-network] Networking Granular Checks: Pods - should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:105 -[BeforeEach] [sig-network] Networking +[sig-node] Security Context When creating a container with runAsUser + should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:347 +[BeforeEach] [sig-node] Security Context set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:11.727 -Jul 29 16:51:11.728: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pod-network-test 07/29/23 16:51:11.731 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:11.762 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:11.766 -[BeforeEach] [sig-network] Networking +STEP: Creating a kubernetes client 08/24/23 12:55:19.485 +Aug 24 12:55:19.485: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename security-context-test 08/24/23 12:55:19.489 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:19.533 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:19.54 +[BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 -[It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:105 -STEP: Performing setup for networking test in namespace pod-network-test-9504 07/29/23 16:51:11.771 -STEP: creating a selector 07/29/23 16:51:11.771 -STEP: Creating the service pods in kubernetes 07/29/23 16:51:11.772 -Jul 29 16:51:11.772: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable -Jul 29 16:51:11.938: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-9504" to be "running and ready" -Jul 29 16:51:11.959: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 21.099126ms -Jul 29 16:51:11.960: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:51:13.969: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.031256136s -Jul 29 16:51:13.970: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:51:15.968: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.029755364s -Jul 29 16:51:15.968: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:51:17.974: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.036018131s -Jul 29 16:51:17.974: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:51:19.968: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.029624528s -Jul 29 16:51:19.968: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:51:21.968: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.029525099s -Jul 29 16:51:21.968: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:51:23.969: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 12.030783994s -Jul 29 16:51:23.969: INFO: The phase of Pod netserver-0 is Running (Ready = true) -Jul 29 16:51:23.969: INFO: Pod "netserver-0" satisfied condition "running and ready" -Jul 29 16:51:23.976: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-9504" to be "running and ready" -Jul 29 16:51:23.982: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 6.553706ms -Jul 29 16:51:23.982: INFO: The phase of Pod netserver-1 is Running (Ready = true) -Jul 29 16:51:23.982: INFO: Pod "netserver-1" satisfied condition "running and ready" -Jul 29 16:51:23.987: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-9504" to be "running and ready" -Jul 29 16:51:23.992: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 4.854389ms -Jul 29 16:51:23.992: INFO: The phase of Pod netserver-2 is Running (Ready = true) -Jul 29 16:51:23.992: INFO: Pod "netserver-2" satisfied condition "running and ready" -STEP: Creating test pods 07/29/23 16:51:23.996 -Jul 29 16:51:24.055: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-9504" to be "running" -Jul 29 16:51:24.064: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 8.4938ms -Jul 29 16:51:26.078: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.022246707s -Jul 29 16:51:26.078: INFO: Pod "test-container-pod" satisfied condition "running" -Jul 29 16:51:26.084: INFO: Waiting up to 5m0s for pod "host-test-container-pod" in namespace "pod-network-test-9504" to be "running" -Jul 29 16:51:26.090: INFO: Pod "host-test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 5.885475ms -Jul 29 16:51:26.090: INFO: Pod "host-test-container-pod" satisfied condition "running" -Jul 29 16:51:26.095: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 -Jul 29 16:51:26.095: INFO: Going to poll 10.233.64.54 on port 8083 at least 0 times, with a maximum of 39 tries before failing -Jul 29 16:51:26.103: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.64.54:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9504 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:51:26.103: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:51:26.106: INFO: ExecWithOptions: Clientset creation -Jul 29 16:51:26.106: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9504/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.64.54%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) -Jul 29 16:51:26.246: INFO: Found all 1 expected endpoints: [netserver-0] -Jul 29 16:51:26.247: INFO: Going to poll 10.233.65.132 on port 8083 at least 0 times, with a maximum of 39 tries before failing -Jul 29 16:51:26.254: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.65.132:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9504 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:51:26.254: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:51:26.256: INFO: ExecWithOptions: Clientset creation -Jul 29 16:51:26.256: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9504/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.65.132%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) -Jul 29 16:51:26.344: INFO: Found all 1 expected endpoints: [netserver-1] -Jul 29 16:51:26.345: INFO: Going to poll 10.233.66.135 on port 8083 at least 0 times, with a maximum of 39 tries before failing -Jul 29 16:51:26.351: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.66.135:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9504 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:51:26.351: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:51:26.353: INFO: ExecWithOptions: Clientset creation -Jul 29 16:51:26.353: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9504/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.66.135%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) -Jul 29 16:51:26.458: INFO: Found all 1 expected endpoints: [netserver-2] -[AfterEach] [sig-network] Networking +[BeforeEach] [sig-node] Security Context + test/e2e/common/node/security_context.go:50 +[It] should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:347 +Aug 24 12:55:19.567: INFO: Waiting up to 5m0s for pod "busybox-user-65534-39941976-142b-4687-a7a8-4810afcf20b6" in namespace "security-context-test-3979" to be "Succeeded or Failed" +Aug 24 12:55:19.577: INFO: Pod "busybox-user-65534-39941976-142b-4687-a7a8-4810afcf20b6": Phase="Pending", Reason="", readiness=false. Elapsed: 9.50449ms +Aug 24 12:55:21.585: INFO: Pod "busybox-user-65534-39941976-142b-4687-a7a8-4810afcf20b6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017863602s +Aug 24 12:55:23.585: INFO: Pod "busybox-user-65534-39941976-142b-4687-a7a8-4810afcf20b6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017454298s +Aug 24 12:55:23.585: INFO: Pod "busybox-user-65534-39941976-142b-4687-a7a8-4810afcf20b6" satisfied condition "Succeeded or Failed" +[AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:26.458: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Networking +Aug 24 12:55:23.585: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Networking +[DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Networking +[DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 -STEP: Destroying namespace "pod-network-test-9504" for this suite. 07/29/23 16:51:26.468 +STEP: Destroying namespace "security-context-test-3979" for this suite. 08/24/23 12:55:23.594 ------------------------------ -• [SLOW TEST] [14.752 seconds] -[sig-network] Networking -test/e2e/common/network/framework.go:23 - Granular Checks: Pods - test/e2e/common/network/networking.go:32 - should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:105 +• [4.123 seconds] +[sig-node] Security Context +test/e2e/common/node/framework.go:23 + When creating a container with runAsUser + test/e2e/common/node/security_context.go:309 + should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:347 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Networking + [BeforeEach] [sig-node] Security Context set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:11.727 - Jul 29 16:51:11.728: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pod-network-test 07/29/23 16:51:11.731 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:11.762 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:11.766 - [BeforeEach] [sig-network] Networking + STEP: Creating a kubernetes client 08/24/23 12:55:19.485 + Aug 24 12:55:19.485: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename security-context-test 08/24/23 12:55:19.489 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:19.533 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:19.54 + [BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 - [It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:105 - STEP: Performing setup for networking test in namespace pod-network-test-9504 07/29/23 16:51:11.771 - STEP: creating a selector 07/29/23 16:51:11.771 - STEP: Creating the service pods in kubernetes 07/29/23 16:51:11.772 - Jul 29 16:51:11.772: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable - Jul 29 16:51:11.938: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-9504" to be "running and ready" - Jul 29 16:51:11.959: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 21.099126ms - Jul 29 16:51:11.960: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:51:13.969: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.031256136s - Jul 29 16:51:13.970: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:51:15.968: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.029755364s - Jul 29 16:51:15.968: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:51:17.974: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.036018131s - Jul 29 16:51:17.974: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:51:19.968: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.029624528s - Jul 29 16:51:19.968: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:51:21.968: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.029525099s - Jul 29 16:51:21.968: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:51:23.969: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 12.030783994s - Jul 29 16:51:23.969: INFO: The phase of Pod netserver-0 is Running (Ready = true) - Jul 29 16:51:23.969: INFO: Pod "netserver-0" satisfied condition "running and ready" - Jul 29 16:51:23.976: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-9504" to be "running and ready" - Jul 29 16:51:23.982: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 6.553706ms - Jul 29 16:51:23.982: INFO: The phase of Pod netserver-1 is Running (Ready = true) - Jul 29 16:51:23.982: INFO: Pod "netserver-1" satisfied condition "running and ready" - Jul 29 16:51:23.987: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-9504" to be "running and ready" - Jul 29 16:51:23.992: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 4.854389ms - Jul 29 16:51:23.992: INFO: The phase of Pod netserver-2 is Running (Ready = true) - Jul 29 16:51:23.992: INFO: Pod "netserver-2" satisfied condition "running and ready" - STEP: Creating test pods 07/29/23 16:51:23.996 - Jul 29 16:51:24.055: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-9504" to be "running" - Jul 29 16:51:24.064: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 8.4938ms - Jul 29 16:51:26.078: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.022246707s - Jul 29 16:51:26.078: INFO: Pod "test-container-pod" satisfied condition "running" - Jul 29 16:51:26.084: INFO: Waiting up to 5m0s for pod "host-test-container-pod" in namespace "pod-network-test-9504" to be "running" - Jul 29 16:51:26.090: INFO: Pod "host-test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 5.885475ms - Jul 29 16:51:26.090: INFO: Pod "host-test-container-pod" satisfied condition "running" - Jul 29 16:51:26.095: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 - Jul 29 16:51:26.095: INFO: Going to poll 10.233.64.54 on port 8083 at least 0 times, with a maximum of 39 tries before failing - Jul 29 16:51:26.103: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.64.54:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9504 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:51:26.103: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:51:26.106: INFO: ExecWithOptions: Clientset creation - Jul 29 16:51:26.106: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9504/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.64.54%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) - Jul 29 16:51:26.246: INFO: Found all 1 expected endpoints: [netserver-0] - Jul 29 16:51:26.247: INFO: Going to poll 10.233.65.132 on port 8083 at least 0 times, with a maximum of 39 tries before failing - Jul 29 16:51:26.254: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.65.132:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9504 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:51:26.254: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:51:26.256: INFO: ExecWithOptions: Clientset creation - Jul 29 16:51:26.256: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9504/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.65.132%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) - Jul 29 16:51:26.344: INFO: Found all 1 expected endpoints: [netserver-1] - Jul 29 16:51:26.345: INFO: Going to poll 10.233.66.135 on port 8083 at least 0 times, with a maximum of 39 tries before failing - Jul 29 16:51:26.351: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.233.66.135:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-9504 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:51:26.351: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:51:26.353: INFO: ExecWithOptions: Clientset creation - Jul 29 16:51:26.353: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-9504/pods/host-test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+--max-time+15+--connect-timeout+1+http%3A%2F%2F10.233.66.135%3A8083%2FhostName+%7C+grep+-v+%27%5E%5Cs%2A%24%27&container=agnhost-container&container=agnhost-container&stderr=true&stdout=true) - Jul 29 16:51:26.458: INFO: Found all 1 expected endpoints: [netserver-2] - [AfterEach] [sig-network] Networking + [BeforeEach] [sig-node] Security Context + test/e2e/common/node/security_context.go:50 + [It] should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:347 + Aug 24 12:55:19.567: INFO: Waiting up to 5m0s for pod "busybox-user-65534-39941976-142b-4687-a7a8-4810afcf20b6" in namespace "security-context-test-3979" to be "Succeeded or Failed" + Aug 24 12:55:19.577: INFO: Pod "busybox-user-65534-39941976-142b-4687-a7a8-4810afcf20b6": Phase="Pending", Reason="", readiness=false. Elapsed: 9.50449ms + Aug 24 12:55:21.585: INFO: Pod "busybox-user-65534-39941976-142b-4687-a7a8-4810afcf20b6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017863602s + Aug 24 12:55:23.585: INFO: Pod "busybox-user-65534-39941976-142b-4687-a7a8-4810afcf20b6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017454298s + Aug 24 12:55:23.585: INFO: Pod "busybox-user-65534-39941976-142b-4687-a7a8-4810afcf20b6" satisfied condition "Succeeded or Failed" + [AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:26.458: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Networking + Aug 24 12:55:23.585: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Networking + [DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Networking + [DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 - STEP: Destroying namespace "pod-network-test-9504" for this suite. 07/29/23 16:51:26.468 + STEP: Destroying namespace "security-context-test-3979" for this suite. 08/24/23 12:55:23.594 << End Captured GinkgoWriter Output ------------------------------ -[sig-node] Containers - should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:59 -[BeforeEach] [sig-node] Containers +SSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Namespaces [Serial] + should ensure that all services are removed when a namespace is deleted [Conformance] + test/e2e/apimachinery/namespace.go:251 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:26.483 -Jul 29 16:51:26.483: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename containers 07/29/23 16:51:26.488 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:26.522 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:26.525 -[BeforeEach] [sig-node] Containers +STEP: Creating a kubernetes client 08/24/23 12:55:23.609 +Aug 24 12:55:23.609: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename namespaces 08/24/23 12:55:23.611 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:23.646 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:23.649 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:59 -STEP: Creating a pod to test override arguments 07/29/23 16:51:26.532 -Jul 29 16:51:26.547: INFO: Waiting up to 5m0s for pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920" in namespace "containers-7734" to be "Succeeded or Failed" -Jul 29 16:51:26.554: INFO: Pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920": Phase="Pending", Reason="", readiness=false. Elapsed: 6.403899ms -Jul 29 16:51:28.566: INFO: Pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018041234s -Jul 29 16:51:30.567: INFO: Pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920": Phase="Pending", Reason="", readiness=false. Elapsed: 4.019508663s -Jul 29 16:51:32.566: INFO: Pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.018763066s -STEP: Saw pod success 07/29/23 16:51:32.567 -Jul 29 16:51:32.567: INFO: Pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920" satisfied condition "Succeeded or Failed" -Jul 29 16:51:32.580: INFO: Trying to get logs from node wetuj3nuajog-3 pod client-containers-d741d384-18d4-410a-95d8-237450ee8920 container agnhost-container: -STEP: delete the pod 07/29/23 16:51:32.662 -Jul 29 16:51:32.698: INFO: Waiting for pod client-containers-d741d384-18d4-410a-95d8-237450ee8920 to disappear -Jul 29 16:51:32.707: INFO: Pod client-containers-d741d384-18d4-410a-95d8-237450ee8920 no longer exists -[AfterEach] [sig-node] Containers +[It] should ensure that all services are removed when a namespace is deleted [Conformance] + test/e2e/apimachinery/namespace.go:251 +STEP: Creating a test namespace 08/24/23 12:55:23.655 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:23.687 +STEP: Creating a service in the namespace 08/24/23 12:55:23.691 +STEP: Deleting the namespace 08/24/23 12:55:23.712 +STEP: Waiting for the namespace to be removed. 08/24/23 12:55:23.736 +STEP: Recreating the namespace 08/24/23 12:55:29.744 +STEP: Verifying there is no service in the namespace 08/24/23 12:55:29.772 +[AfterEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:32.707: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Containers +Aug 24 12:55:29.781: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Containers +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Containers +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "containers-7734" for this suite. 07/29/23 16:51:32.714 +STEP: Destroying namespace "namespaces-7975" for this suite. 08/24/23 12:55:29.794 +STEP: Destroying namespace "nsdeletetest-6532" for this suite. 08/24/23 12:55:29.805 +Aug 24 12:55:29.810: INFO: Namespace nsdeletetest-6532 was already deleted +STEP: Destroying namespace "nsdeletetest-1683" for this suite. 08/24/23 12:55:29.81 ------------------------------ -• [SLOW TEST] [6.245 seconds] -[sig-node] Containers -test/e2e/common/node/framework.go:23 - should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:59 +• [SLOW TEST] [6.212 seconds] +[sig-api-machinery] Namespaces [Serial] +test/e2e/apimachinery/framework.go:23 + should ensure that all services are removed when a namespace is deleted [Conformance] + test/e2e/apimachinery/namespace.go:251 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Containers + [BeforeEach] [sig-api-machinery] Namespaces [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:26.483 - Jul 29 16:51:26.483: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename containers 07/29/23 16:51:26.488 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:26.522 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:26.525 - [BeforeEach] [sig-node] Containers + STEP: Creating a kubernetes client 08/24/23 12:55:23.609 + Aug 24 12:55:23.609: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename namespaces 08/24/23 12:55:23.611 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:23.646 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:23.649 + [BeforeEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] - test/e2e/common/node/containers.go:59 - STEP: Creating a pod to test override arguments 07/29/23 16:51:26.532 - Jul 29 16:51:26.547: INFO: Waiting up to 5m0s for pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920" in namespace "containers-7734" to be "Succeeded or Failed" - Jul 29 16:51:26.554: INFO: Pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920": Phase="Pending", Reason="", readiness=false. Elapsed: 6.403899ms - Jul 29 16:51:28.566: INFO: Pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018041234s - Jul 29 16:51:30.567: INFO: Pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920": Phase="Pending", Reason="", readiness=false. Elapsed: 4.019508663s - Jul 29 16:51:32.566: INFO: Pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.018763066s - STEP: Saw pod success 07/29/23 16:51:32.567 - Jul 29 16:51:32.567: INFO: Pod "client-containers-d741d384-18d4-410a-95d8-237450ee8920" satisfied condition "Succeeded or Failed" - Jul 29 16:51:32.580: INFO: Trying to get logs from node wetuj3nuajog-3 pod client-containers-d741d384-18d4-410a-95d8-237450ee8920 container agnhost-container: - STEP: delete the pod 07/29/23 16:51:32.662 - Jul 29 16:51:32.698: INFO: Waiting for pod client-containers-d741d384-18d4-410a-95d8-237450ee8920 to disappear - Jul 29 16:51:32.707: INFO: Pod client-containers-d741d384-18d4-410a-95d8-237450ee8920 no longer exists - [AfterEach] [sig-node] Containers + [It] should ensure that all services are removed when a namespace is deleted [Conformance] + test/e2e/apimachinery/namespace.go:251 + STEP: Creating a test namespace 08/24/23 12:55:23.655 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:23.687 + STEP: Creating a service in the namespace 08/24/23 12:55:23.691 + STEP: Deleting the namespace 08/24/23 12:55:23.712 + STEP: Waiting for the namespace to be removed. 08/24/23 12:55:23.736 + STEP: Recreating the namespace 08/24/23 12:55:29.744 + STEP: Verifying there is no service in the namespace 08/24/23 12:55:29.772 + [AfterEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:32.707: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Containers + Aug 24 12:55:29.781: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Containers + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Containers + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "containers-7734" for this suite. 07/29/23 16:51:32.714 + STEP: Destroying namespace "namespaces-7975" for this suite. 08/24/23 12:55:29.794 + STEP: Destroying namespace "nsdeletetest-6532" for this suite. 08/24/23 12:55:29.805 + Aug 24 12:55:29.810: INFO: Namespace nsdeletetest-6532 was already deleted + STEP: Destroying namespace "nsdeletetest-1683" for this suite. 08/24/23 12:55:29.81 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] ConfigMap - updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:124 -[BeforeEach] [sig-storage] ConfigMap +[sig-node] Ephemeral Containers [NodeConformance] + will start an ephemeral container in an existing pod [Conformance] + test/e2e/common/node/ephemeral_containers.go:45 +[BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:32.734 -Jul 29 16:51:32.734: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:51:32.738 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:32.766 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:32.773 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 12:55:29.822 +Aug 24 12:55:29.822: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename ephemeral-containers-test 08/24/23 12:55:29.824 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:29.848 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:29.852 +[BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] test/e2e/framework/metrics/init/init.go:31 -[It] updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:124 -STEP: Creating configMap with name configmap-test-upd-5e4b24c0-41b7-4bd8-8682-2c0ab5b2a724 07/29/23 16:51:32.783 -STEP: Creating the pod 07/29/23 16:51:32.79 -Jul 29 16:51:32.803: INFO: Waiting up to 5m0s for pod "pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495" in namespace "configmap-9463" to be "running and ready" -Jul 29 16:51:32.807: INFO: Pod "pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495": Phase="Pending", Reason="", readiness=false. Elapsed: 4.789743ms -Jul 29 16:51:32.807: INFO: The phase of Pod pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:51:34.817: INFO: Pod "pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495": Phase="Running", Reason="", readiness=true. Elapsed: 2.014807336s -Jul 29 16:51:34.818: INFO: The phase of Pod pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495 is Running (Ready = true) -Jul 29 16:51:34.818: INFO: Pod "pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495" satisfied condition "running and ready" -STEP: Updating configmap configmap-test-upd-5e4b24c0-41b7-4bd8-8682-2c0ab5b2a724 07/29/23 16:51:34.839 -STEP: waiting to observe update in volume 07/29/23 16:51:34.856 -[AfterEach] [sig-storage] ConfigMap +[BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] + test/e2e/common/node/ephemeral_containers.go:38 +[It] will start an ephemeral container in an existing pod [Conformance] + test/e2e/common/node/ephemeral_containers.go:45 +STEP: creating a target pod 08/24/23 12:55:29.857 +Aug 24 12:55:29.872: INFO: Waiting up to 5m0s for pod "ephemeral-containers-target-pod" in namespace "ephemeral-containers-test-8428" to be "running and ready" +Aug 24 12:55:29.883: INFO: Pod "ephemeral-containers-target-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 11.051852ms +Aug 24 12:55:29.883: INFO: The phase of Pod ephemeral-containers-target-pod is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:55:31.894: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.022296668s +Aug 24 12:55:31.894: INFO: The phase of Pod ephemeral-containers-target-pod is Running (Ready = true) +Aug 24 12:55:31.895: INFO: Pod "ephemeral-containers-target-pod" satisfied condition "running and ready" +STEP: adding an ephemeral container 08/24/23 12:55:31.902 +Aug 24 12:55:31.937: INFO: Waiting up to 1m0s for pod "ephemeral-containers-target-pod" in namespace "ephemeral-containers-test-8428" to be "container debugger running" +Aug 24 12:55:31.943: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 5.289046ms +Aug 24 12:55:33.951: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.013584441s +Aug 24 12:55:35.949: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 4.011915044s +Aug 24 12:55:35.949: INFO: Pod "ephemeral-containers-target-pod" satisfied condition "container debugger running" +STEP: checking pod container endpoints 08/24/23 12:55:35.95 +Aug 24 12:55:35.950: INFO: ExecWithOptions {Command:[/bin/echo marco] Namespace:ephemeral-containers-test-8428 PodName:ephemeral-containers-target-pod ContainerName:debugger Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 12:55:35.950: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 12:55:35.952: INFO: ExecWithOptions: Clientset creation +Aug 24 12:55:35.952: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/ephemeral-containers-test-8428/pods/ephemeral-containers-target-pod/exec?command=%2Fbin%2Fecho&command=marco&container=debugger&container=debugger&stderr=true&stdout=true) +Aug 24 12:55:36.095: INFO: Exec stderr: "" +[AfterEach] [sig-node] Ephemeral Containers [NodeConformance] test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:36.893: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 12:55:36.125: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-9463" for this suite. 07/29/23 16:51:36.903 +STEP: Destroying namespace "ephemeral-containers-test-8428" for this suite. 08/24/23 12:55:36.134 ------------------------------ -• [4.183 seconds] -[sig-storage] ConfigMap -test/e2e/common/storage/framework.go:23 - updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:124 +• [SLOW TEST] [6.324 seconds] +[sig-node] Ephemeral Containers [NodeConformance] +test/e2e/common/node/framework.go:23 + will start an ephemeral container in an existing pod [Conformance] + test/e2e/common/node/ephemeral_containers.go:45 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:32.734 - Jul 29 16:51:32.734: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:51:32.738 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:32.766 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:32.773 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 12:55:29.822 + Aug 24 12:55:29.822: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename ephemeral-containers-test 08/24/23 12:55:29.824 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:29.848 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:29.852 + [BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] test/e2e/framework/metrics/init/init.go:31 - [It] updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:124 - STEP: Creating configMap with name configmap-test-upd-5e4b24c0-41b7-4bd8-8682-2c0ab5b2a724 07/29/23 16:51:32.783 - STEP: Creating the pod 07/29/23 16:51:32.79 - Jul 29 16:51:32.803: INFO: Waiting up to 5m0s for pod "pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495" in namespace "configmap-9463" to be "running and ready" - Jul 29 16:51:32.807: INFO: Pod "pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495": Phase="Pending", Reason="", readiness=false. Elapsed: 4.789743ms - Jul 29 16:51:32.807: INFO: The phase of Pod pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:51:34.817: INFO: Pod "pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495": Phase="Running", Reason="", readiness=true. Elapsed: 2.014807336s - Jul 29 16:51:34.818: INFO: The phase of Pod pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495 is Running (Ready = true) - Jul 29 16:51:34.818: INFO: Pod "pod-configmaps-a31ae3a7-bb5e-4973-bb82-47d9d8fac495" satisfied condition "running and ready" - STEP: Updating configmap configmap-test-upd-5e4b24c0-41b7-4bd8-8682-2c0ab5b2a724 07/29/23 16:51:34.839 - STEP: waiting to observe update in volume 07/29/23 16:51:34.856 - [AfterEach] [sig-storage] ConfigMap + [BeforeEach] [sig-node] Ephemeral Containers [NodeConformance] + test/e2e/common/node/ephemeral_containers.go:38 + [It] will start an ephemeral container in an existing pod [Conformance] + test/e2e/common/node/ephemeral_containers.go:45 + STEP: creating a target pod 08/24/23 12:55:29.857 + Aug 24 12:55:29.872: INFO: Waiting up to 5m0s for pod "ephemeral-containers-target-pod" in namespace "ephemeral-containers-test-8428" to be "running and ready" + Aug 24 12:55:29.883: INFO: Pod "ephemeral-containers-target-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 11.051852ms + Aug 24 12:55:29.883: INFO: The phase of Pod ephemeral-containers-target-pod is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:55:31.894: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.022296668s + Aug 24 12:55:31.894: INFO: The phase of Pod ephemeral-containers-target-pod is Running (Ready = true) + Aug 24 12:55:31.895: INFO: Pod "ephemeral-containers-target-pod" satisfied condition "running and ready" + STEP: adding an ephemeral container 08/24/23 12:55:31.902 + Aug 24 12:55:31.937: INFO: Waiting up to 1m0s for pod "ephemeral-containers-target-pod" in namespace "ephemeral-containers-test-8428" to be "container debugger running" + Aug 24 12:55:31.943: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 5.289046ms + Aug 24 12:55:33.951: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.013584441s + Aug 24 12:55:35.949: INFO: Pod "ephemeral-containers-target-pod": Phase="Running", Reason="", readiness=true. Elapsed: 4.011915044s + Aug 24 12:55:35.949: INFO: Pod "ephemeral-containers-target-pod" satisfied condition "container debugger running" + STEP: checking pod container endpoints 08/24/23 12:55:35.95 + Aug 24 12:55:35.950: INFO: ExecWithOptions {Command:[/bin/echo marco] Namespace:ephemeral-containers-test-8428 PodName:ephemeral-containers-target-pod ContainerName:debugger Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 12:55:35.950: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 12:55:35.952: INFO: ExecWithOptions: Clientset creation + Aug 24 12:55:35.952: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/ephemeral-containers-test-8428/pods/ephemeral-containers-target-pod/exec?command=%2Fbin%2Fecho&command=marco&container=debugger&container=debugger&stderr=true&stdout=true) + Aug 24 12:55:36.095: INFO: Exec stderr: "" + [AfterEach] [sig-node] Ephemeral Containers [NodeConformance] test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:36.893: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 12:55:36.125: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-node] Ephemeral Containers [NodeConformance] tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-9463" for this suite. 07/29/23 16:51:36.903 + STEP: Destroying namespace "ephemeral-containers-test-8428" for this suite. 08/24/23 12:55:36.134 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl replace - should update a single-container pod's image [Conformance] - test/e2e/kubectl/kubectl.go:1747 -[BeforeEach] [sig-cli] Kubectl client +[sig-node] Lease + lease API should be available [Conformance] + test/e2e/common/node/lease.go:72 +[BeforeEach] [sig-node] Lease set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:36.918 -Jul 29 16:51:36.918: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:51:36.92 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:36.948 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:36.951 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 12:55:36.15 +Aug 24 12:55:36.150: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename lease-test 08/24/23 12:55:36.152 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:36.18 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:36.185 +[BeforeEach] [sig-node] Lease test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[BeforeEach] Kubectl replace - test/e2e/kubectl/kubectl.go:1734 -[It] should update a single-container pod's image [Conformance] - test/e2e/kubectl/kubectl.go:1747 -STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 07/29/23 16:51:36.956 -Jul 29 16:51:36.957: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-7096 run e2e-test-httpd-pod --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4 --pod-running-timeout=2m0s --labels=run=e2e-test-httpd-pod' -Jul 29 16:51:37.146: INFO: stderr: "" -Jul 29 16:51:37.146: INFO: stdout: "pod/e2e-test-httpd-pod created\n" -STEP: verifying the pod e2e-test-httpd-pod is running 07/29/23 16:51:37.146 -STEP: verifying the pod e2e-test-httpd-pod was created 07/29/23 16:51:42.198 -Jul 29 16:51:42.199: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-7096 get pod e2e-test-httpd-pod -o json' -Jul 29 16:51:42.454: INFO: stderr: "" -Jul 29 16:51:42.455: INFO: stdout: "{\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2023-07-29T16:51:37Z\",\n \"labels\": {\n \"run\": \"e2e-test-httpd-pod\"\n },\n \"name\": \"e2e-test-httpd-pod\",\n \"namespace\": \"kubectl-7096\",\n \"resourceVersion\": \"33578\",\n \"uid\": \"657ebd49-ec63-4460-a7e2-107c39a771db\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"e2e-test-httpd-pod\",\n \"resources\": {},\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-cmxlv\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"ClusterFirst\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"wetuj3nuajog-3\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 30,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-cmxlv\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-07-29T16:51:37Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-07-29T16:51:38Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-07-29T16:51:38Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-07-29T16:51:37Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"cri-o://ceea50299b62bbf696d8749830b6d4f12e373f30c0c5f7de1e7451bd5e4aa3ac\",\n \"image\": \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\",\n \"imageID\": \"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22\",\n \"lastState\": {},\n \"name\": \"e2e-test-httpd-pod\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2023-07-29T16:51:38Z\"\n }\n }\n }\n ],\n \"hostIP\": \"192.168.121.141\",\n \"phase\": \"Running\",\n \"podIP\": \"10.233.66.75\",\n \"podIPs\": [\n {\n \"ip\": \"10.233.66.75\"\n }\n ],\n \"qosClass\": \"BestEffort\",\n \"startTime\": \"2023-07-29T16:51:37Z\"\n }\n}\n" -STEP: replace the image in the pod 07/29/23 16:51:42.455 -Jul 29 16:51:42.455: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-7096 replace -f -' -Jul 29 16:51:44.309: INFO: stderr: "" -Jul 29 16:51:44.310: INFO: stdout: "pod/e2e-test-httpd-pod replaced\n" -STEP: verifying the pod e2e-test-httpd-pod has the right image registry.k8s.io/e2e-test-images/busybox:1.29-4 07/29/23 16:51:44.31 -[AfterEach] Kubectl replace - test/e2e/kubectl/kubectl.go:1738 -Jul 29 16:51:44.322: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-7096 delete pods e2e-test-httpd-pod' -Jul 29 16:51:45.768: INFO: stderr: "" -Jul 29 16:51:45.768: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" -[AfterEach] [sig-cli] Kubectl client +[It] lease API should be available [Conformance] + test/e2e/common/node/lease.go:72 +[AfterEach] [sig-node] Lease test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:45.768: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 12:55:36.296: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Lease test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-node] Lease dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-node] Lease tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-7096" for this suite. 07/29/23 16:51:45.779 +STEP: Destroying namespace "lease-test-5599" for this suite. 08/24/23 12:55:36.305 ------------------------------ -• [SLOW TEST] [8.874 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Kubectl replace - test/e2e/kubectl/kubectl.go:1731 - should update a single-container pod's image [Conformance] - test/e2e/kubectl/kubectl.go:1747 +• [0.167 seconds] +[sig-node] Lease +test/e2e/common/node/framework.go:23 + lease API should be available [Conformance] + test/e2e/common/node/lease.go:72 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-node] Lease set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:36.918 - Jul 29 16:51:36.918: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:51:36.92 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:36.948 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:36.951 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 12:55:36.15 + Aug 24 12:55:36.150: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename lease-test 08/24/23 12:55:36.152 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:36.18 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:36.185 + [BeforeEach] [sig-node] Lease test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [BeforeEach] Kubectl replace - test/e2e/kubectl/kubectl.go:1734 - [It] should update a single-container pod's image [Conformance] - test/e2e/kubectl/kubectl.go:1747 - STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 07/29/23 16:51:36.956 - Jul 29 16:51:36.957: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-7096 run e2e-test-httpd-pod --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4 --pod-running-timeout=2m0s --labels=run=e2e-test-httpd-pod' - Jul 29 16:51:37.146: INFO: stderr: "" - Jul 29 16:51:37.146: INFO: stdout: "pod/e2e-test-httpd-pod created\n" - STEP: verifying the pod e2e-test-httpd-pod is running 07/29/23 16:51:37.146 - STEP: verifying the pod e2e-test-httpd-pod was created 07/29/23 16:51:42.198 - Jul 29 16:51:42.199: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-7096 get pod e2e-test-httpd-pod -o json' - Jul 29 16:51:42.454: INFO: stderr: "" - Jul 29 16:51:42.455: INFO: stdout: "{\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2023-07-29T16:51:37Z\",\n \"labels\": {\n \"run\": \"e2e-test-httpd-pod\"\n },\n \"name\": \"e2e-test-httpd-pod\",\n \"namespace\": \"kubectl-7096\",\n \"resourceVersion\": \"33578\",\n \"uid\": \"657ebd49-ec63-4460-a7e2-107c39a771db\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"e2e-test-httpd-pod\",\n \"resources\": {},\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-cmxlv\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"ClusterFirst\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"wetuj3nuajog-3\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 30,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-cmxlv\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-07-29T16:51:37Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-07-29T16:51:38Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-07-29T16:51:38Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2023-07-29T16:51:37Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"cri-o://ceea50299b62bbf696d8749830b6d4f12e373f30c0c5f7de1e7451bd5e4aa3ac\",\n \"image\": \"registry.k8s.io/e2e-test-images/httpd:2.4.38-4\",\n \"imageID\": \"registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22\",\n \"lastState\": {},\n \"name\": \"e2e-test-httpd-pod\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2023-07-29T16:51:38Z\"\n }\n }\n }\n ],\n \"hostIP\": \"192.168.121.141\",\n \"phase\": \"Running\",\n \"podIP\": \"10.233.66.75\",\n \"podIPs\": [\n {\n \"ip\": \"10.233.66.75\"\n }\n ],\n \"qosClass\": \"BestEffort\",\n \"startTime\": \"2023-07-29T16:51:37Z\"\n }\n}\n" - STEP: replace the image in the pod 07/29/23 16:51:42.455 - Jul 29 16:51:42.455: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-7096 replace -f -' - Jul 29 16:51:44.309: INFO: stderr: "" - Jul 29 16:51:44.310: INFO: stdout: "pod/e2e-test-httpd-pod replaced\n" - STEP: verifying the pod e2e-test-httpd-pod has the right image registry.k8s.io/e2e-test-images/busybox:1.29-4 07/29/23 16:51:44.31 - [AfterEach] Kubectl replace - test/e2e/kubectl/kubectl.go:1738 - Jul 29 16:51:44.322: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-7096 delete pods e2e-test-httpd-pod' - Jul 29 16:51:45.768: INFO: stderr: "" - Jul 29 16:51:45.768: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" - [AfterEach] [sig-cli] Kubectl client + [It] lease API should be available [Conformance] + test/e2e/common/node/lease.go:72 + [AfterEach] [sig-node] Lease test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:45.768: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 12:55:36.296: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Lease test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-node] Lease dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-node] Lease tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-7096" for this suite. 07/29/23 16:51:45.779 + STEP: Destroying namespace "lease-test-5599" for this suite. 08/24/23 12:55:36.305 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] ResourceQuota - should manage the lifecycle of a ResourceQuota [Conformance] - test/e2e/apimachinery/resource_quota.go:943 -[BeforeEach] [sig-api-machinery] ResourceQuota +[sig-api-machinery] Garbage collector + should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + test/e2e/apimachinery/garbage_collector.go:650 +[BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:45.794 -Jul 29 16:51:45.794: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename resourcequota 07/29/23 16:51:45.796 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:45.822 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:45.827 -[BeforeEach] [sig-api-machinery] ResourceQuota +STEP: Creating a kubernetes client 08/24/23 12:55:36.323 +Aug 24 12:55:36.323: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename gc 08/24/23 12:55:36.325 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:36.349 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:36.355 +[BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 -[It] should manage the lifecycle of a ResourceQuota [Conformance] - test/e2e/apimachinery/resource_quota.go:943 -STEP: Creating a ResourceQuota 07/29/23 16:51:45.835 -STEP: Getting a ResourceQuota 07/29/23 16:51:45.846 -STEP: Listing all ResourceQuotas with LabelSelector 07/29/23 16:51:45.855 -STEP: Patching the ResourceQuota 07/29/23 16:51:45.86 -STEP: Deleting a Collection of ResourceQuotas 07/29/23 16:51:45.878 -STEP: Verifying the deleted ResourceQuota 07/29/23 16:51:45.894 -[AfterEach] [sig-api-machinery] ResourceQuota +[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + test/e2e/apimachinery/garbage_collector.go:650 +STEP: create the rc 08/24/23 12:55:36.374 +STEP: delete the rc 08/24/23 12:55:41.855 +STEP: wait for the rc to be deleted 08/24/23 12:55:41.948 +Aug 24 12:55:43.534: INFO: 87 pods remaining +Aug 24 12:55:43.534: INFO: 79 pods has nil DeletionTimestamp +Aug 24 12:55:43.534: INFO: +Aug 24 12:55:44.997: INFO: 84 pods remaining +Aug 24 12:55:44.997: INFO: 67 pods has nil DeletionTimestamp +Aug 24 12:55:44.998: INFO: +Aug 24 12:55:46.595: INFO: 69 pods remaining +Aug 24 12:55:46.595: INFO: 41 pods has nil DeletionTimestamp +Aug 24 12:55:46.595: INFO: +Aug 24 12:55:47.080: INFO: 60 pods remaining +Aug 24 12:55:47.080: INFO: 31 pods has nil DeletionTimestamp +Aug 24 12:55:47.080: INFO: +Aug 24 12:55:48.591: INFO: 56 pods remaining +Aug 24 12:55:48.591: INFO: 17 pods has nil DeletionTimestamp +Aug 24 12:55:48.591: INFO: +Aug 24 12:55:49.695: INFO: 49 pods remaining +Aug 24 12:55:49.695: INFO: 0 pods has nil DeletionTimestamp +Aug 24 12:55:49.695: INFO: +Aug 24 12:55:50.134: INFO: 43 pods remaining +Aug 24 12:55:50.151: INFO: 0 pods has nil DeletionTimestamp +Aug 24 12:55:50.161: INFO: +Aug 24 12:55:51.020: INFO: 34 pods remaining +Aug 24 12:55:51.214: INFO: 0 pods has nil DeletionTimestamp +Aug 24 12:55:51.214: INFO: +Aug 24 12:55:52.107: INFO: 29 pods remaining +Aug 24 12:55:52.107: INFO: 0 pods has nil DeletionTimestamp +Aug 24 12:55:52.107: INFO: +Aug 24 12:55:53.172: INFO: 21 pods remaining +Aug 24 12:55:53.172: INFO: 0 pods has nil DeletionTimestamp +Aug 24 12:55:53.172: INFO: +Aug 24 12:55:53.979: INFO: 14 pods remaining +Aug 24 12:55:53.979: INFO: 0 pods has nil DeletionTimestamp +Aug 24 12:55:53.979: INFO: +Aug 24 12:55:55.066: INFO: 8 pods remaining +Aug 24 12:55:55.066: INFO: 0 pods has nil DeletionTimestamp +Aug 24 12:55:55.066: INFO: +Aug 24 12:55:56.096: INFO: 1 pods remaining +Aug 24 12:55:56.096: INFO: 0 pods has nil DeletionTimestamp +Aug 24 12:55:56.096: INFO: +STEP: Gathering metrics 08/24/23 12:55:57.044 +Aug 24 12:55:57.226: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" +Aug 24 12:55:57.447: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 220.264385ms +Aug 24 12:55:57.447: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) +Aug 24 12:55:57.447: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" +Aug 24 12:55:57.872: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:45.900: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +Aug 24 12:55:57.874: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota +[DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 -STEP: Destroying namespace "resourcequota-6455" for this suite. 07/29/23 16:51:45.907 +STEP: Destroying namespace "gc-50" for this suite. 08/24/23 12:55:57.907 ------------------------------ -• [0.126 seconds] -[sig-api-machinery] ResourceQuota +• [SLOW TEST] [21.643 seconds] +[sig-api-machinery] Garbage collector test/e2e/apimachinery/framework.go:23 - should manage the lifecycle of a ResourceQuota [Conformance] - test/e2e/apimachinery/resource_quota.go:943 + should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + test/e2e/apimachinery/garbage_collector.go:650 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] ResourceQuota + [BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:45.794 - Jul 29 16:51:45.794: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename resourcequota 07/29/23 16:51:45.796 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:45.822 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:45.827 - [BeforeEach] [sig-api-machinery] ResourceQuota + STEP: Creating a kubernetes client 08/24/23 12:55:36.323 + Aug 24 12:55:36.323: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename gc 08/24/23 12:55:36.325 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:36.349 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:36.355 + [BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 - [It] should manage the lifecycle of a ResourceQuota [Conformance] - test/e2e/apimachinery/resource_quota.go:943 - STEP: Creating a ResourceQuota 07/29/23 16:51:45.835 - STEP: Getting a ResourceQuota 07/29/23 16:51:45.846 - STEP: Listing all ResourceQuotas with LabelSelector 07/29/23 16:51:45.855 - STEP: Patching the ResourceQuota 07/29/23 16:51:45.86 - STEP: Deleting a Collection of ResourceQuotas 07/29/23 16:51:45.878 - STEP: Verifying the deleted ResourceQuota 07/29/23 16:51:45.894 - [AfterEach] [sig-api-machinery] ResourceQuota + [It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + test/e2e/apimachinery/garbage_collector.go:650 + STEP: create the rc 08/24/23 12:55:36.374 + STEP: delete the rc 08/24/23 12:55:41.855 + STEP: wait for the rc to be deleted 08/24/23 12:55:41.948 + Aug 24 12:55:43.534: INFO: 87 pods remaining + Aug 24 12:55:43.534: INFO: 79 pods has nil DeletionTimestamp + Aug 24 12:55:43.534: INFO: + Aug 24 12:55:44.997: INFO: 84 pods remaining + Aug 24 12:55:44.997: INFO: 67 pods has nil DeletionTimestamp + Aug 24 12:55:44.998: INFO: + Aug 24 12:55:46.595: INFO: 69 pods remaining + Aug 24 12:55:46.595: INFO: 41 pods has nil DeletionTimestamp + Aug 24 12:55:46.595: INFO: + Aug 24 12:55:47.080: INFO: 60 pods remaining + Aug 24 12:55:47.080: INFO: 31 pods has nil DeletionTimestamp + Aug 24 12:55:47.080: INFO: + Aug 24 12:55:48.591: INFO: 56 pods remaining + Aug 24 12:55:48.591: INFO: 17 pods has nil DeletionTimestamp + Aug 24 12:55:48.591: INFO: + Aug 24 12:55:49.695: INFO: 49 pods remaining + Aug 24 12:55:49.695: INFO: 0 pods has nil DeletionTimestamp + Aug 24 12:55:49.695: INFO: + Aug 24 12:55:50.134: INFO: 43 pods remaining + Aug 24 12:55:50.151: INFO: 0 pods has nil DeletionTimestamp + Aug 24 12:55:50.161: INFO: + Aug 24 12:55:51.020: INFO: 34 pods remaining + Aug 24 12:55:51.214: INFO: 0 pods has nil DeletionTimestamp + Aug 24 12:55:51.214: INFO: + Aug 24 12:55:52.107: INFO: 29 pods remaining + Aug 24 12:55:52.107: INFO: 0 pods has nil DeletionTimestamp + Aug 24 12:55:52.107: INFO: + Aug 24 12:55:53.172: INFO: 21 pods remaining + Aug 24 12:55:53.172: INFO: 0 pods has nil DeletionTimestamp + Aug 24 12:55:53.172: INFO: + Aug 24 12:55:53.979: INFO: 14 pods remaining + Aug 24 12:55:53.979: INFO: 0 pods has nil DeletionTimestamp + Aug 24 12:55:53.979: INFO: + Aug 24 12:55:55.066: INFO: 8 pods remaining + Aug 24 12:55:55.066: INFO: 0 pods has nil DeletionTimestamp + Aug 24 12:55:55.066: INFO: + Aug 24 12:55:56.096: INFO: 1 pods remaining + Aug 24 12:55:56.096: INFO: 0 pods has nil DeletionTimestamp + Aug 24 12:55:56.096: INFO: + STEP: Gathering metrics 08/24/23 12:55:57.044 + Aug 24 12:55:57.226: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" + Aug 24 12:55:57.447: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 220.264385ms + Aug 24 12:55:57.447: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) + Aug 24 12:55:57.447: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" + Aug 24 12:55:57.872: INFO: For apiserver_request_total: + For apiserver_request_latency_seconds: + For apiserver_init_events_total: + For garbage_collector_attempt_to_delete_queue_latency: + For garbage_collector_attempt_to_delete_work_duration: + For garbage_collector_attempt_to_orphan_queue_latency: + For garbage_collector_attempt_to_orphan_work_duration: + For garbage_collector_dirty_processing_latency_microseconds: + For garbage_collector_event_processing_latency_microseconds: + For garbage_collector_graph_changes_queue_latency: + For garbage_collector_graph_changes_work_duration: + For garbage_collector_orphan_processing_latency_microseconds: + For namespace_queue_latency: + For namespace_queue_latency_sum: + For namespace_queue_latency_count: + For namespace_retries: + For namespace_work_duration: + For namespace_work_duration_sum: + For namespace_work_duration_count: + For function_duration_seconds: + For errors_total: + For evicted_pods_total: + + [AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:45.900: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + Aug 24 12:55:57.874: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota + [DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 - STEP: Destroying namespace "resourcequota-6455" for this suite. 07/29/23 16:51:45.907 + STEP: Destroying namespace "gc-50" for this suite. 08/24/23 12:55:57.907 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-node] Container Runtime blackbox test on terminated container - should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:232 -[BeforeEach] [sig-node] Container Runtime +[sig-apps] Job + should create pods for an Indexed job with completion indexes and specified hostname [Conformance] + test/e2e/apps/job.go:366 +[BeforeEach] [sig-apps] Job set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:45.923 -Jul 29 16:51:45.924: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-runtime 07/29/23 16:51:45.926 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:45.951 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:45.959 -[BeforeEach] [sig-node] Container Runtime +STEP: Creating a kubernetes client 08/24/23 12:55:57.971 +Aug 24 12:55:57.971: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename job 08/24/23 12:55:57.988 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:58.113 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:58.165 +[BeforeEach] [sig-apps] Job test/e2e/framework/metrics/init/init.go:31 -[It] should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:232 -STEP: create the container 07/29/23 16:51:45.967 -STEP: wait for the container to reach Succeeded 07/29/23 16:51:45.981 -STEP: get the container status 07/29/23 16:51:50.024 -STEP: the container should be terminated 07/29/23 16:51:50.03 -STEP: the termination message should be set 07/29/23 16:51:50.03 -Jul 29 16:51:50.031: INFO: Expected: &{} to match Container's Termination Message: -- -STEP: delete the container 07/29/23 16:51:50.031 -[AfterEach] [sig-node] Container Runtime +[It] should create pods for an Indexed job with completion indexes and specified hostname [Conformance] + test/e2e/apps/job.go:366 +STEP: Creating Indexed job 08/24/23 12:55:58.221 +STEP: Ensuring job reaches completions 08/24/23 12:55:58.478 +STEP: Ensuring pods with index for job exist 08/24/23 12:56:14.548 +[AfterEach] [sig-apps] Job test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:50.057: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Container Runtime +Aug 24 12:56:14.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Job test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Container Runtime +[DeferCleanup (Each)] [sig-apps] Job dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Container Runtime +[DeferCleanup (Each)] [sig-apps] Job tear down framework | framework.go:193 -STEP: Destroying namespace "container-runtime-9223" for this suite. 07/29/23 16:51:50.068 +STEP: Destroying namespace "job-4113" for this suite. 08/24/23 12:56:14.578 ------------------------------ -• [4.167 seconds] -[sig-node] Container Runtime -test/e2e/common/node/framework.go:23 - blackbox test - test/e2e/common/node/runtime.go:44 - on terminated container - test/e2e/common/node/runtime.go:137 - should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:232 +• [SLOW TEST] [16.632 seconds] +[sig-apps] Job +test/e2e/apps/framework.go:23 + should create pods for an Indexed job with completion indexes and specified hostname [Conformance] + test/e2e/apps/job.go:366 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Container Runtime + [BeforeEach] [sig-apps] Job set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:45.923 - Jul 29 16:51:45.924: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-runtime 07/29/23 16:51:45.926 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:45.951 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:45.959 - [BeforeEach] [sig-node] Container Runtime + STEP: Creating a kubernetes client 08/24/23 12:55:57.971 + Aug 24 12:55:57.971: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename job 08/24/23 12:55:57.988 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:55:58.113 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:55:58.165 + [BeforeEach] [sig-apps] Job test/e2e/framework/metrics/init/init.go:31 - [It] should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:232 - STEP: create the container 07/29/23 16:51:45.967 - STEP: wait for the container to reach Succeeded 07/29/23 16:51:45.981 - STEP: get the container status 07/29/23 16:51:50.024 - STEP: the container should be terminated 07/29/23 16:51:50.03 - STEP: the termination message should be set 07/29/23 16:51:50.03 - Jul 29 16:51:50.031: INFO: Expected: &{} to match Container's Termination Message: -- - STEP: delete the container 07/29/23 16:51:50.031 - [AfterEach] [sig-node] Container Runtime + [It] should create pods for an Indexed job with completion indexes and specified hostname [Conformance] + test/e2e/apps/job.go:366 + STEP: Creating Indexed job 08/24/23 12:55:58.221 + STEP: Ensuring job reaches completions 08/24/23 12:55:58.478 + STEP: Ensuring pods with index for job exist 08/24/23 12:56:14.548 + [AfterEach] [sig-apps] Job test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:50.057: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Container Runtime + Aug 24 12:56:14.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Job test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Container Runtime + [DeferCleanup (Each)] [sig-apps] Job dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Container Runtime + [DeferCleanup (Each)] [sig-apps] Job tear down framework | framework.go:193 - STEP: Destroying namespace "container-runtime-9223" for this suite. 07/29/23 16:51:50.068 + STEP: Destroying namespace "job-4113" for this suite. 08/24/23 12:56:14.578 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Kubectl version - should check is all data is printed [Conformance] - test/e2e/kubectl/kubectl.go:1685 -[BeforeEach] [sig-cli] Kubectl client +[sig-node] Downward API + should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:44 +[BeforeEach] [sig-node] Downward API set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:50.094 -Jul 29 16:51:50.094: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:51:50.096 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:50.157 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:50.17 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 12:56:14.618 +Aug 24 12:56:14.618: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 12:56:14.62 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:14.686 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:14.694 +[BeforeEach] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[It] should check is all data is printed [Conformance] - test/e2e/kubectl/kubectl.go:1685 -Jul 29 16:51:50.176: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-8334 version' -Jul 29 16:51:50.279: INFO: stderr: "WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version.\n" -Jul 29 16:51:50.280: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"26\", GitVersion:\"v1.26.7\", GitCommit:\"84e1fc493a47446df2e155e70fca768d2653a398\", GitTreeState:\"clean\", BuildDate:\"2023-07-19T12:23:27Z\", GoVersion:\"go1.20.6\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nKustomize Version: v4.5.7\nServer Version: version.Info{Major:\"1\", Minor:\"26\", GitVersion:\"v1.26.7\", GitCommit:\"84e1fc493a47446df2e155e70fca768d2653a398\", GitTreeState:\"clean\", BuildDate:\"2023-07-19T12:16:45Z\", GoVersion:\"go1.20.6\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" -[AfterEach] [sig-cli] Kubectl client +[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:44 +STEP: Creating a pod to test downward api env vars 08/24/23 12:56:14.703 +Aug 24 12:56:14.718: INFO: Waiting up to 5m0s for pod "downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e" in namespace "downward-api-4585" to be "Succeeded or Failed" +Aug 24 12:56:14.749: INFO: Pod "downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e": Phase="Pending", Reason="", readiness=false. Elapsed: 30.904668ms +Aug 24 12:56:16.758: INFO: Pod "downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.03966067s +Aug 24 12:56:18.758: INFO: Pod "downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.039302187s +STEP: Saw pod success 08/24/23 12:56:18.758 +Aug 24 12:56:18.759: INFO: Pod "downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e" satisfied condition "Succeeded or Failed" +Aug 24 12:56:18.769: INFO: Trying to get logs from node pe9deep4seen-3 pod downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e container dapi-container: +STEP: delete the pod 08/24/23 12:56:18.792 +Aug 24 12:56:18.818: INFO: Waiting for pod downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e to disappear +Aug 24 12:56:18.823: INFO: Pod downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e no longer exists +[AfterEach] [sig-node] Downward API test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:50.280: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 12:56:18.823: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-node] Downward API dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-node] Downward API tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-8334" for this suite. 07/29/23 16:51:50.291 +STEP: Destroying namespace "downward-api-4585" for this suite. 08/24/23 12:56:18.833 ------------------------------ -• [0.207 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Kubectl version - test/e2e/kubectl/kubectl.go:1679 - should check is all data is printed [Conformance] - test/e2e/kubectl/kubectl.go:1685 +• [4.233 seconds] +[sig-node] Downward API +test/e2e/common/node/framework.go:23 + should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:44 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-node] Downward API set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:50.094 - Jul 29 16:51:50.094: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:51:50.096 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:50.157 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:50.17 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 12:56:14.618 + Aug 24 12:56:14.618: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 12:56:14.62 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:14.686 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:14.694 + [BeforeEach] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [It] should check is all data is printed [Conformance] - test/e2e/kubectl/kubectl.go:1685 - Jul 29 16:51:50.176: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-8334 version' - Jul 29 16:51:50.279: INFO: stderr: "WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version.\n" - Jul 29 16:51:50.280: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"26\", GitVersion:\"v1.26.7\", GitCommit:\"84e1fc493a47446df2e155e70fca768d2653a398\", GitTreeState:\"clean\", BuildDate:\"2023-07-19T12:23:27Z\", GoVersion:\"go1.20.6\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nKustomize Version: v4.5.7\nServer Version: version.Info{Major:\"1\", Minor:\"26\", GitVersion:\"v1.26.7\", GitCommit:\"84e1fc493a47446df2e155e70fca768d2653a398\", GitTreeState:\"clean\", BuildDate:\"2023-07-19T12:16:45Z\", GoVersion:\"go1.20.6\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" - [AfterEach] [sig-cli] Kubectl client + [It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:44 + STEP: Creating a pod to test downward api env vars 08/24/23 12:56:14.703 + Aug 24 12:56:14.718: INFO: Waiting up to 5m0s for pod "downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e" in namespace "downward-api-4585" to be "Succeeded or Failed" + Aug 24 12:56:14.749: INFO: Pod "downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e": Phase="Pending", Reason="", readiness=false. Elapsed: 30.904668ms + Aug 24 12:56:16.758: INFO: Pod "downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.03966067s + Aug 24 12:56:18.758: INFO: Pod "downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.039302187s + STEP: Saw pod success 08/24/23 12:56:18.758 + Aug 24 12:56:18.759: INFO: Pod "downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e" satisfied condition "Succeeded or Failed" + Aug 24 12:56:18.769: INFO: Trying to get logs from node pe9deep4seen-3 pod downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e container dapi-container: + STEP: delete the pod 08/24/23 12:56:18.792 + Aug 24 12:56:18.818: INFO: Waiting for pod downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e to disappear + Aug 24 12:56:18.823: INFO: Pod downward-api-b92be33a-a1cf-4e21-b930-29ad84b7917e no longer exists + [AfterEach] [sig-node] Downward API test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:50.280: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 12:56:18.823: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-node] Downward API dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-node] Downward API tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-8334" for this suite. 07/29/23 16:51:50.291 + STEP: Destroying namespace "downward-api-4585" for this suite. 08/24/23 12:56:18.833 << End Captured GinkgoWriter Output ------------------------------ -[sig-auth] ServiceAccounts - should run through the lifecycle of a ServiceAccount [Conformance] - test/e2e/auth/service_accounts.go:649 -[BeforeEach] [sig-auth] ServiceAccounts +SSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD with validation schema [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:69 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:50.301 -Jul 29 16:51:50.301: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename svcaccounts 07/29/23 16:51:50.303 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:50.332 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:50.338 -[BeforeEach] [sig-auth] ServiceAccounts +STEP: Creating a kubernetes client 08/24/23 12:56:18.855 +Aug 24 12:56:18.855: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:56:18.856 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:18.889 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:18.895 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should run through the lifecycle of a ServiceAccount [Conformance] - test/e2e/auth/service_accounts.go:649 -STEP: creating a ServiceAccount 07/29/23 16:51:50.343 -STEP: watching for the ServiceAccount to be added 07/29/23 16:51:50.369 -STEP: patching the ServiceAccount 07/29/23 16:51:50.371 -STEP: finding ServiceAccount in list of all ServiceAccounts (by LabelSelector) 07/29/23 16:51:50.381 -STEP: deleting the ServiceAccount 07/29/23 16:51:50.387 -[AfterEach] [sig-auth] ServiceAccounts +[It] works for CRD with validation schema [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:69 +Aug 24 12:56:18.906: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: kubectl validation (kubectl create and apply) allows request with known and required properties 08/24/23 12:56:22.833 +Aug 24 12:56:22.835: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 create -f -' +Aug 24 12:56:25.103: INFO: stderr: "" +Aug 24 12:56:25.103: INFO: stdout: "e2e-test-crd-publish-openapi-4766-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" +Aug 24 12:56:25.104: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 delete e2e-test-crd-publish-openapi-4766-crds test-foo' +Aug 24 12:56:25.316: INFO: stderr: "" +Aug 24 12:56:25.317: INFO: stdout: "e2e-test-crd-publish-openapi-4766-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" +Aug 24 12:56:25.317: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 apply -f -' +Aug 24 12:56:26.688: INFO: stderr: "" +Aug 24 12:56:26.688: INFO: stdout: "e2e-test-crd-publish-openapi-4766-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" +Aug 24 12:56:26.688: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 delete e2e-test-crd-publish-openapi-4766-crds test-foo' +Aug 24 12:56:26.887: INFO: stderr: "" +Aug 24 12:56:26.887: INFO: stdout: "e2e-test-crd-publish-openapi-4766-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" +STEP: kubectl validation (kubectl create and apply) rejects request with value outside defined enum values 08/24/23 12:56:26.887 +Aug 24 12:56:26.887: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 create -f -' +Aug 24 12:56:27.374: INFO: rc: 1 +STEP: kubectl validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema 08/24/23 12:56:27.374 +Aug 24 12:56:27.374: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 create -f -' +Aug 24 12:56:27.924: INFO: rc: 1 +Aug 24 12:56:27.924: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 apply -f -' +Aug 24 12:56:28.390: INFO: rc: 1 +STEP: kubectl validation (kubectl create and apply) rejects request without required properties 08/24/23 12:56:28.39 +Aug 24 12:56:28.391: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 create -f -' +Aug 24 12:56:28.801: INFO: rc: 1 +Aug 24 12:56:28.802: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 apply -f -' +Aug 24 12:56:29.250: INFO: rc: 1 +STEP: kubectl explain works to explain CR properties 08/24/23 12:56:29.25 +Aug 24 12:56:29.251: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 explain e2e-test-crd-publish-openapi-4766-crds' +Aug 24 12:56:29.726: INFO: stderr: "" +Aug 24 12:56:29.726: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4766-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nDESCRIPTION:\n Foo CRD for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t\n Specification of Foo\n\n status\t\n Status of Foo\n\n" +STEP: kubectl explain works to explain CR properties recursively 08/24/23 12:56:29.727 +Aug 24 12:56:29.727: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 explain e2e-test-crd-publish-openapi-4766-crds.metadata' +Aug 24 12:56:30.156: INFO: stderr: "" +Aug 24 12:56:30.156: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4766-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: metadata \n\nDESCRIPTION:\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n ObjectMeta is metadata that all persisted resources must have, which\n includes all objects users must create.\n\nFIELDS:\n annotations\t\n Annotations is an unstructured key value map stored with a resource that\n may be set by external tools to store and retrieve arbitrary metadata. They\n are not queryable and should be preserved when modifying objects. More\n info: http://kubernetes.io/docs/user-guide/annotations\n\n creationTimestamp\t\n CreationTimestamp is a timestamp representing the server time when this\n object was created. It is not guaranteed to be set in happens-before order\n across separate operations. Clients may not set this value. It is\n represented in RFC3339 form and is in UTC.\n\n Populated by the system. Read-only. Null for lists. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n deletionGracePeriodSeconds\t\n Number of seconds allowed for this object to gracefully terminate before it\n will be removed from the system. Only set when deletionTimestamp is also\n set. May only be shortened. Read-only.\n\n deletionTimestamp\t\n DeletionTimestamp is RFC 3339 date and time at which this resource will be\n deleted. This field is set by the server when a graceful deletion is\n requested by the user, and is not directly settable by a client. The\n resource is expected to be deleted (no longer visible from resource lists,\n and not reachable by name) after the time in this field, once the\n finalizers list is empty. As long as the finalizers list contains items,\n deletion is blocked. Once the deletionTimestamp is set, this value may not\n be unset or be set further into the future, although it may be shortened or\n the resource may be deleted prior to this time. For example, a user may\n request that a pod is deleted in 30 seconds. The Kubelet will react by\n sending a graceful termination signal to the containers in the pod. After\n that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL)\n to the container and after cleanup, remove the pod from the API. In the\n presence of network partitions, this object may still exist after this\n timestamp, until an administrator or automated process can determine the\n resource is fully terminated. If not set, graceful deletion of the object\n has not been requested.\n\n Populated by the system when a graceful deletion is requested. Read-only.\n More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n finalizers\t<[]string>\n Must be empty before the object is deleted from the registry. Each entry is\n an identifier for the responsible component that will remove the entry from\n the list. If the deletionTimestamp of the object is non-nil, entries in\n this list can only be removed. Finalizers may be processed and removed in\n any order. Order is NOT enforced because it introduces significant risk of\n stuck finalizers. finalizers is a shared field, any actor with permission\n can reorder it. If the finalizer list is processed in order, then this can\n lead to a situation in which the component responsible for the first\n finalizer in the list is waiting for a signal (field value, external\n system, or other) produced by a component responsible for a finalizer later\n in the list, resulting in a deadlock. Without enforced ordering finalizers\n are free to order amongst themselves and are not vulnerable to ordering\n changes in the list.\n\n generateName\t\n GenerateName is an optional prefix, used by the server, to generate a\n unique name ONLY IF the Name field has not been provided. If this field is\n used, the name returned to the client will be different than the name\n passed. This value will also be combined with a unique suffix. The provided\n value has the same validation rules as the Name field, and may be truncated\n by the length of the suffix required to make the value unique on the\n server.\n\n If this field is specified and the generated name exists, the server will\n return a 409.\n\n Applied only if Name is not specified. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n\n generation\t\n A sequence number representing a specific generation of the desired state.\n Populated by the system. Read-only.\n\n labels\t\n Map of string keys and values that can be used to organize and categorize\n (scope and select) objects. May match selectors of replication controllers\n and services. More info: http://kubernetes.io/docs/user-guide/labels\n\n managedFields\t<[]Object>\n ManagedFields maps workflow-id and version to the set of fields that are\n managed by that workflow. This is mostly for internal housekeeping, and\n users typically shouldn't need to set or understand this field. A workflow\n can be the user's name, a controller's name, or the name of a specific\n apply path like \"ci-cd\". The set of fields is always in the version that\n the workflow used when modifying the object.\n\n name\t\n Name must be unique within a namespace. Is required when creating\n resources, although some resources may allow a client to request the\n generation of an appropriate name automatically. Name is primarily intended\n for creation idempotence and configuration definition. Cannot be updated.\n More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n namespace\t\n Namespace defines the space within which each name must be unique. An empty\n namespace is equivalent to the \"default\" namespace, but \"default\" is the\n canonical representation. Not all objects are required to be scoped to a\n namespace - the value of this field for those objects will be empty.\n\n Must be a DNS_LABEL. Cannot be updated. More info:\n http://kubernetes.io/docs/user-guide/namespaces\n\n ownerReferences\t<[]Object>\n List of objects depended by this object. If ALL objects in the list have\n been deleted, this object will be garbage collected. If this object is\n managed by a controller, then an entry in this list will point to this\n controller, with the controller field set to true. There cannot be more\n than one managing controller.\n\n resourceVersion\t\n An opaque value that represents the internal version of this object that\n can be used by clients to determine when objects have changed. May be used\n for optimistic concurrency, change detection, and the watch operation on a\n resource or set of resources. Clients must treat these values as opaque and\n passed unmodified back to the server. They may only be valid for a\n particular resource or set of resources.\n\n Populated by the system. Read-only. Value must be treated as opaque by\n clients and . More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n\n selfLink\t\n Deprecated: selfLink is a legacy read-only field that is no longer\n populated by the system.\n\n uid\t\n UID is the unique in time and space value for this object. It is typically\n generated by the server on successful creation of a resource and is not\n allowed to change on PUT operations.\n\n Populated by the system. Read-only. More info:\n http://kubernetes.io/docs/user-guide/identifiers#uids\n\n" +Aug 24 12:56:30.158: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 explain e2e-test-crd-publish-openapi-4766-crds.spec' +Aug 24 12:56:30.685: INFO: stderr: "" +Aug 24 12:56:30.685: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4766-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: spec \n\nDESCRIPTION:\n Specification of Foo\n\nFIELDS:\n bars\t<[]Object>\n List of Bars and their specs.\n\n" +Aug 24 12:56:30.686: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 explain e2e-test-crd-publish-openapi-4766-crds.spec.bars' +Aug 24 12:56:31.179: INFO: stderr: "" +Aug 24 12:56:31.179: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4766-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: bars <[]Object>\n\nDESCRIPTION:\n List of Bars and their specs.\n\nFIELDS:\n age\t\n Age of Bar.\n\n bazs\t<[]string>\n List of Bazs.\n\n feeling\t\n Whether Bar is feeling great.\n\n name\t -required-\n Name of Bar.\n\n" +STEP: kubectl explain works to return error when explain is called on property that doesn't exist 08/24/23 12:56:31.18 +Aug 24 12:56:31.180: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 explain e2e-test-crd-publish-openapi-4766-crds.spec.bars2' +Aug 24 12:56:31.758: INFO: rc: 1 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:50.408: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +Aug 24 12:56:34.147: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "svcaccounts-4071" for this suite. 07/29/23 16:51:50.422 +STEP: Destroying namespace "crd-publish-openapi-8975" for this suite. 08/24/23 12:56:34.177 ------------------------------ -• [0.139 seconds] -[sig-auth] ServiceAccounts -test/e2e/auth/framework.go:23 - should run through the lifecycle of a ServiceAccount [Conformance] - test/e2e/auth/service_accounts.go:649 +• [SLOW TEST] [15.344 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + works for CRD with validation schema [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:69 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-auth] ServiceAccounts + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:50.301 - Jul 29 16:51:50.301: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename svcaccounts 07/29/23 16:51:50.303 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:50.332 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:50.338 - [BeforeEach] [sig-auth] ServiceAccounts + STEP: Creating a kubernetes client 08/24/23 12:56:18.855 + Aug 24 12:56:18.855: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 12:56:18.856 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:18.889 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:18.895 + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should run through the lifecycle of a ServiceAccount [Conformance] - test/e2e/auth/service_accounts.go:649 - STEP: creating a ServiceAccount 07/29/23 16:51:50.343 - STEP: watching for the ServiceAccount to be added 07/29/23 16:51:50.369 - STEP: patching the ServiceAccount 07/29/23 16:51:50.371 - STEP: finding ServiceAccount in list of all ServiceAccounts (by LabelSelector) 07/29/23 16:51:50.381 - STEP: deleting the ServiceAccount 07/29/23 16:51:50.387 - [AfterEach] [sig-auth] ServiceAccounts + [It] works for CRD with validation schema [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:69 + Aug 24 12:56:18.906: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: kubectl validation (kubectl create and apply) allows request with known and required properties 08/24/23 12:56:22.833 + Aug 24 12:56:22.835: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 create -f -' + Aug 24 12:56:25.103: INFO: stderr: "" + Aug 24 12:56:25.103: INFO: stdout: "e2e-test-crd-publish-openapi-4766-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" + Aug 24 12:56:25.104: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 delete e2e-test-crd-publish-openapi-4766-crds test-foo' + Aug 24 12:56:25.316: INFO: stderr: "" + Aug 24 12:56:25.317: INFO: stdout: "e2e-test-crd-publish-openapi-4766-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" + Aug 24 12:56:25.317: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 apply -f -' + Aug 24 12:56:26.688: INFO: stderr: "" + Aug 24 12:56:26.688: INFO: stdout: "e2e-test-crd-publish-openapi-4766-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" + Aug 24 12:56:26.688: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 delete e2e-test-crd-publish-openapi-4766-crds test-foo' + Aug 24 12:56:26.887: INFO: stderr: "" + Aug 24 12:56:26.887: INFO: stdout: "e2e-test-crd-publish-openapi-4766-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" + STEP: kubectl validation (kubectl create and apply) rejects request with value outside defined enum values 08/24/23 12:56:26.887 + Aug 24 12:56:26.887: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 create -f -' + Aug 24 12:56:27.374: INFO: rc: 1 + STEP: kubectl validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema 08/24/23 12:56:27.374 + Aug 24 12:56:27.374: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 create -f -' + Aug 24 12:56:27.924: INFO: rc: 1 + Aug 24 12:56:27.924: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 apply -f -' + Aug 24 12:56:28.390: INFO: rc: 1 + STEP: kubectl validation (kubectl create and apply) rejects request without required properties 08/24/23 12:56:28.39 + Aug 24 12:56:28.391: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 create -f -' + Aug 24 12:56:28.801: INFO: rc: 1 + Aug 24 12:56:28.802: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 --namespace=crd-publish-openapi-8975 apply -f -' + Aug 24 12:56:29.250: INFO: rc: 1 + STEP: kubectl explain works to explain CR properties 08/24/23 12:56:29.25 + Aug 24 12:56:29.251: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 explain e2e-test-crd-publish-openapi-4766-crds' + Aug 24 12:56:29.726: INFO: stderr: "" + Aug 24 12:56:29.726: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4766-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nDESCRIPTION:\n Foo CRD for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t\n Specification of Foo\n\n status\t\n Status of Foo\n\n" + STEP: kubectl explain works to explain CR properties recursively 08/24/23 12:56:29.727 + Aug 24 12:56:29.727: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 explain e2e-test-crd-publish-openapi-4766-crds.metadata' + Aug 24 12:56:30.156: INFO: stderr: "" + Aug 24 12:56:30.156: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4766-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: metadata \n\nDESCRIPTION:\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n ObjectMeta is metadata that all persisted resources must have, which\n includes all objects users must create.\n\nFIELDS:\n annotations\t\n Annotations is an unstructured key value map stored with a resource that\n may be set by external tools to store and retrieve arbitrary metadata. They\n are not queryable and should be preserved when modifying objects. More\n info: http://kubernetes.io/docs/user-guide/annotations\n\n creationTimestamp\t\n CreationTimestamp is a timestamp representing the server time when this\n object was created. It is not guaranteed to be set in happens-before order\n across separate operations. Clients may not set this value. It is\n represented in RFC3339 form and is in UTC.\n\n Populated by the system. Read-only. Null for lists. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n deletionGracePeriodSeconds\t\n Number of seconds allowed for this object to gracefully terminate before it\n will be removed from the system. Only set when deletionTimestamp is also\n set. May only be shortened. Read-only.\n\n deletionTimestamp\t\n DeletionTimestamp is RFC 3339 date and time at which this resource will be\n deleted. This field is set by the server when a graceful deletion is\n requested by the user, and is not directly settable by a client. The\n resource is expected to be deleted (no longer visible from resource lists,\n and not reachable by name) after the time in this field, once the\n finalizers list is empty. As long as the finalizers list contains items,\n deletion is blocked. Once the deletionTimestamp is set, this value may not\n be unset or be set further into the future, although it may be shortened or\n the resource may be deleted prior to this time. For example, a user may\n request that a pod is deleted in 30 seconds. The Kubelet will react by\n sending a graceful termination signal to the containers in the pod. After\n that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL)\n to the container and after cleanup, remove the pod from the API. In the\n presence of network partitions, this object may still exist after this\n timestamp, until an administrator or automated process can determine the\n resource is fully terminated. If not set, graceful deletion of the object\n has not been requested.\n\n Populated by the system when a graceful deletion is requested. Read-only.\n More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n finalizers\t<[]string>\n Must be empty before the object is deleted from the registry. Each entry is\n an identifier for the responsible component that will remove the entry from\n the list. If the deletionTimestamp of the object is non-nil, entries in\n this list can only be removed. Finalizers may be processed and removed in\n any order. Order is NOT enforced because it introduces significant risk of\n stuck finalizers. finalizers is a shared field, any actor with permission\n can reorder it. If the finalizer list is processed in order, then this can\n lead to a situation in which the component responsible for the first\n finalizer in the list is waiting for a signal (field value, external\n system, or other) produced by a component responsible for a finalizer later\n in the list, resulting in a deadlock. Without enforced ordering finalizers\n are free to order amongst themselves and are not vulnerable to ordering\n changes in the list.\n\n generateName\t\n GenerateName is an optional prefix, used by the server, to generate a\n unique name ONLY IF the Name field has not been provided. If this field is\n used, the name returned to the client will be different than the name\n passed. This value will also be combined with a unique suffix. The provided\n value has the same validation rules as the Name field, and may be truncated\n by the length of the suffix required to make the value unique on the\n server.\n\n If this field is specified and the generated name exists, the server will\n return a 409.\n\n Applied only if Name is not specified. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n\n generation\t\n A sequence number representing a specific generation of the desired state.\n Populated by the system. Read-only.\n\n labels\t\n Map of string keys and values that can be used to organize and categorize\n (scope and select) objects. May match selectors of replication controllers\n and services. More info: http://kubernetes.io/docs/user-guide/labels\n\n managedFields\t<[]Object>\n ManagedFields maps workflow-id and version to the set of fields that are\n managed by that workflow. This is mostly for internal housekeeping, and\n users typically shouldn't need to set or understand this field. A workflow\n can be the user's name, a controller's name, or the name of a specific\n apply path like \"ci-cd\". The set of fields is always in the version that\n the workflow used when modifying the object.\n\n name\t\n Name must be unique within a namespace. Is required when creating\n resources, although some resources may allow a client to request the\n generation of an appropriate name automatically. Name is primarily intended\n for creation idempotence and configuration definition. Cannot be updated.\n More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n namespace\t\n Namespace defines the space within which each name must be unique. An empty\n namespace is equivalent to the \"default\" namespace, but \"default\" is the\n canonical representation. Not all objects are required to be scoped to a\n namespace - the value of this field for those objects will be empty.\n\n Must be a DNS_LABEL. Cannot be updated. More info:\n http://kubernetes.io/docs/user-guide/namespaces\n\n ownerReferences\t<[]Object>\n List of objects depended by this object. If ALL objects in the list have\n been deleted, this object will be garbage collected. If this object is\n managed by a controller, then an entry in this list will point to this\n controller, with the controller field set to true. There cannot be more\n than one managing controller.\n\n resourceVersion\t\n An opaque value that represents the internal version of this object that\n can be used by clients to determine when objects have changed. May be used\n for optimistic concurrency, change detection, and the watch operation on a\n resource or set of resources. Clients must treat these values as opaque and\n passed unmodified back to the server. They may only be valid for a\n particular resource or set of resources.\n\n Populated by the system. Read-only. Value must be treated as opaque by\n clients and . More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n\n selfLink\t\n Deprecated: selfLink is a legacy read-only field that is no longer\n populated by the system.\n\n uid\t\n UID is the unique in time and space value for this object. It is typically\n generated by the server on successful creation of a resource and is not\n allowed to change on PUT operations.\n\n Populated by the system. Read-only. More info:\n http://kubernetes.io/docs/user-guide/identifiers#uids\n\n" + Aug 24 12:56:30.158: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 explain e2e-test-crd-publish-openapi-4766-crds.spec' + Aug 24 12:56:30.685: INFO: stderr: "" + Aug 24 12:56:30.685: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4766-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: spec \n\nDESCRIPTION:\n Specification of Foo\n\nFIELDS:\n bars\t<[]Object>\n List of Bars and their specs.\n\n" + Aug 24 12:56:30.686: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 explain e2e-test-crd-publish-openapi-4766-crds.spec.bars' + Aug 24 12:56:31.179: INFO: stderr: "" + Aug 24 12:56:31.179: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-4766-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: bars <[]Object>\n\nDESCRIPTION:\n List of Bars and their specs.\n\nFIELDS:\n age\t\n Age of Bar.\n\n bazs\t<[]string>\n List of Bazs.\n\n feeling\t\n Whether Bar is feeling great.\n\n name\t -required-\n Name of Bar.\n\n" + STEP: kubectl explain works to return error when explain is called on property that doesn't exist 08/24/23 12:56:31.18 + Aug 24 12:56:31.180: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=crd-publish-openapi-8975 explain e2e-test-crd-publish-openapi-4766-crds.spec.bars2' + Aug 24 12:56:31.758: INFO: rc: 1 + [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:50.408: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + Aug 24 12:56:34.147: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "svcaccounts-4071" for this suite. 07/29/23 16:51:50.422 + STEP: Destroying namespace "crd-publish-openapi-8975" for this suite. 08/24/23 12:56:34.177 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSS +SSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:97 -[BeforeEach] [sig-storage] EmptyDir volumes +[sig-network] Services + should serve multiport endpoints from pods [Conformance] + test/e2e/network/service.go:848 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:50.463 -Jul 29 16:51:50.464: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 16:51:50.467 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:50.495 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:50.498 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 12:56:34.212 +Aug 24 12:56:34.212: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 12:56:34.219 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:34.257 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:34.264 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[It] should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:97 -STEP: Creating a pod to test emptydir 0644 on tmpfs 07/29/23 16:51:50.502 -Jul 29 16:51:50.518: INFO: Waiting up to 5m0s for pod "pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f" in namespace "emptydir-3906" to be "Succeeded or Failed" -Jul 29 16:51:50.534: INFO: Pod "pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f": Phase="Pending", Reason="", readiness=false. Elapsed: 15.779128ms -Jul 29 16:51:52.540: INFO: Pod "pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021232515s -Jul 29 16:51:54.541: INFO: Pod "pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022708638s -STEP: Saw pod success 07/29/23 16:51:54.541 -Jul 29 16:51:54.542: INFO: Pod "pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f" satisfied condition "Succeeded or Failed" -Jul 29 16:51:54.547: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f container test-container: -STEP: delete the pod 07/29/23 16:51:54.559 -Jul 29 16:51:54.584: INFO: Waiting for pod pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f to disappear -Jul 29 16:51:54.590: INFO: Pod pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should serve multiport endpoints from pods [Conformance] + test/e2e/network/service.go:848 +STEP: creating service multi-endpoint-test in namespace services-1910 08/24/23 12:56:34.271 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-1910 to expose endpoints map[] 08/24/23 12:56:34.293 +Aug 24 12:56:34.311: INFO: Failed go get Endpoints object: endpoints "multi-endpoint-test" not found +Aug 24 12:56:35.335: INFO: successfully validated that service multi-endpoint-test in namespace services-1910 exposes endpoints map[] +STEP: Creating pod pod1 in namespace services-1910 08/24/23 12:56:35.336 +Aug 24 12:56:35.355: INFO: Waiting up to 5m0s for pod "pod1" in namespace "services-1910" to be "running and ready" +Aug 24 12:56:35.371: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 16.61824ms +Aug 24 12:56:35.372: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:56:37.381: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.026582729s +Aug 24 12:56:37.382: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:56:39.385: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 4.030186977s +Aug 24 12:56:39.385: INFO: The phase of Pod pod1 is Running (Ready = true) +Aug 24 12:56:39.385: INFO: Pod "pod1" satisfied condition "running and ready" +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-1910 to expose endpoints map[pod1:[100]] 08/24/23 12:56:39.396 +Aug 24 12:56:39.439: INFO: successfully validated that service multi-endpoint-test in namespace services-1910 exposes endpoints map[pod1:[100]] +STEP: Creating pod pod2 in namespace services-1910 08/24/23 12:56:39.439 +Aug 24 12:56:39.457: INFO: Waiting up to 5m0s for pod "pod2" in namespace "services-1910" to be "running and ready" +Aug 24 12:56:39.464: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 6.779289ms +Aug 24 12:56:39.464: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 12:56:41.472: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 2.014749175s +Aug 24 12:56:41.472: INFO: The phase of Pod pod2 is Running (Ready = true) +Aug 24 12:56:41.472: INFO: Pod "pod2" satisfied condition "running and ready" +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-1910 to expose endpoints map[pod1:[100] pod2:[101]] 08/24/23 12:56:41.478 +Aug 24 12:56:41.500: INFO: successfully validated that service multi-endpoint-test in namespace services-1910 exposes endpoints map[pod1:[100] pod2:[101]] +STEP: Checking if the Service forwards traffic to pods 08/24/23 12:56:41.5 +Aug 24 12:56:41.500: INFO: Creating new exec pod +Aug 24 12:56:41.510: INFO: Waiting up to 5m0s for pod "execpodc572b" in namespace "services-1910" to be "running" +Aug 24 12:56:41.519: INFO: Pod "execpodc572b": Phase="Pending", Reason="", readiness=false. Elapsed: 8.682102ms +Aug 24 12:56:43.528: INFO: Pod "execpodc572b": Phase="Running", Reason="", readiness=true. Elapsed: 2.01764322s +Aug 24 12:56:43.528: INFO: Pod "execpodc572b" satisfied condition "running" +Aug 24 12:56:44.532: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1910 exec execpodc572b -- /bin/sh -x -c nc -v -z -w 2 multi-endpoint-test 80' +Aug 24 12:56:44.852: INFO: stderr: "+ nc -v -z -w 2 multi-endpoint-test 80\nConnection to multi-endpoint-test 80 port [tcp/http] succeeded!\n" +Aug 24 12:56:44.852: INFO: stdout: "" +Aug 24 12:56:44.852: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1910 exec execpodc572b -- /bin/sh -x -c nc -v -z -w 2 10.233.15.247 80' +Aug 24 12:56:45.113: INFO: stderr: "+ nc -v -z -w 2 10.233.15.247 80\nConnection to 10.233.15.247 80 port [tcp/http] succeeded!\n" +Aug 24 12:56:45.113: INFO: stdout: "" +Aug 24 12:56:45.113: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1910 exec execpodc572b -- /bin/sh -x -c nc -v -z -w 2 multi-endpoint-test 81' +Aug 24 12:56:45.404: INFO: stderr: "+ nc -v -z -w 2 multi-endpoint-test 81\nConnection to multi-endpoint-test 81 port [tcp/*] succeeded!\n" +Aug 24 12:56:45.404: INFO: stdout: "" +Aug 24 12:56:45.404: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1910 exec execpodc572b -- /bin/sh -x -c nc -v -z -w 2 10.233.15.247 81' +Aug 24 12:56:45.654: INFO: stderr: "+ nc -v -z -w 2 10.233.15.247 81\nConnection to 10.233.15.247 81 port [tcp/*] succeeded!\n" +Aug 24 12:56:45.654: INFO: stdout: "" +STEP: Deleting pod pod1 in namespace services-1910 08/24/23 12:56:45.655 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-1910 to expose endpoints map[pod2:[101]] 08/24/23 12:56:45.691 +Aug 24 12:56:45.790: INFO: successfully validated that service multi-endpoint-test in namespace services-1910 exposes endpoints map[pod2:[101]] +STEP: Deleting pod pod2 in namespace services-1910 08/24/23 12:56:45.79 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-1910 to expose endpoints map[] 08/24/23 12:56:45.858 +Aug 24 12:56:45.902: INFO: successfully validated that service multi-endpoint-test in namespace services-1910 exposes endpoints map[] +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:54.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 12:56:45.955: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-3906" for this suite. 07/29/23 16:51:54.6 +STEP: Destroying namespace "services-1910" for this suite. 08/24/23 12:56:45.98 ------------------------------ -• [4.155 seconds] -[sig-storage] EmptyDir volumes -test/e2e/common/storage/framework.go:23 - should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:97 +• [SLOW TEST] [11.802 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should serve multiport endpoints from pods [Conformance] + test/e2e/network/service.go:848 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:50.463 - Jul 29 16:51:50.464: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 16:51:50.467 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:50.495 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:50.498 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 12:56:34.212 + Aug 24 12:56:34.212: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 12:56:34.219 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:34.257 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:34.264 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [It] should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:97 - STEP: Creating a pod to test emptydir 0644 on tmpfs 07/29/23 16:51:50.502 - Jul 29 16:51:50.518: INFO: Waiting up to 5m0s for pod "pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f" in namespace "emptydir-3906" to be "Succeeded or Failed" - Jul 29 16:51:50.534: INFO: Pod "pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f": Phase="Pending", Reason="", readiness=false. Elapsed: 15.779128ms - Jul 29 16:51:52.540: INFO: Pod "pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021232515s - Jul 29 16:51:54.541: INFO: Pod "pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022708638s - STEP: Saw pod success 07/29/23 16:51:54.541 - Jul 29 16:51:54.542: INFO: Pod "pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f" satisfied condition "Succeeded or Failed" - Jul 29 16:51:54.547: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f container test-container: - STEP: delete the pod 07/29/23 16:51:54.559 - Jul 29 16:51:54.584: INFO: Waiting for pod pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f to disappear - Jul 29 16:51:54.590: INFO: Pod pod-cd6ee7ee-c84b-4098-becf-c8e16c28936f no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should serve multiport endpoints from pods [Conformance] + test/e2e/network/service.go:848 + STEP: creating service multi-endpoint-test in namespace services-1910 08/24/23 12:56:34.271 + STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-1910 to expose endpoints map[] 08/24/23 12:56:34.293 + Aug 24 12:56:34.311: INFO: Failed go get Endpoints object: endpoints "multi-endpoint-test" not found + Aug 24 12:56:35.335: INFO: successfully validated that service multi-endpoint-test in namespace services-1910 exposes endpoints map[] + STEP: Creating pod pod1 in namespace services-1910 08/24/23 12:56:35.336 + Aug 24 12:56:35.355: INFO: Waiting up to 5m0s for pod "pod1" in namespace "services-1910" to be "running and ready" + Aug 24 12:56:35.371: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 16.61824ms + Aug 24 12:56:35.372: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:56:37.381: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.026582729s + Aug 24 12:56:37.382: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:56:39.385: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 4.030186977s + Aug 24 12:56:39.385: INFO: The phase of Pod pod1 is Running (Ready = true) + Aug 24 12:56:39.385: INFO: Pod "pod1" satisfied condition "running and ready" + STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-1910 to expose endpoints map[pod1:[100]] 08/24/23 12:56:39.396 + Aug 24 12:56:39.439: INFO: successfully validated that service multi-endpoint-test in namespace services-1910 exposes endpoints map[pod1:[100]] + STEP: Creating pod pod2 in namespace services-1910 08/24/23 12:56:39.439 + Aug 24 12:56:39.457: INFO: Waiting up to 5m0s for pod "pod2" in namespace "services-1910" to be "running and ready" + Aug 24 12:56:39.464: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 6.779289ms + Aug 24 12:56:39.464: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 12:56:41.472: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 2.014749175s + Aug 24 12:56:41.472: INFO: The phase of Pod pod2 is Running (Ready = true) + Aug 24 12:56:41.472: INFO: Pod "pod2" satisfied condition "running and ready" + STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-1910 to expose endpoints map[pod1:[100] pod2:[101]] 08/24/23 12:56:41.478 + Aug 24 12:56:41.500: INFO: successfully validated that service multi-endpoint-test in namespace services-1910 exposes endpoints map[pod1:[100] pod2:[101]] + STEP: Checking if the Service forwards traffic to pods 08/24/23 12:56:41.5 + Aug 24 12:56:41.500: INFO: Creating new exec pod + Aug 24 12:56:41.510: INFO: Waiting up to 5m0s for pod "execpodc572b" in namespace "services-1910" to be "running" + Aug 24 12:56:41.519: INFO: Pod "execpodc572b": Phase="Pending", Reason="", readiness=false. Elapsed: 8.682102ms + Aug 24 12:56:43.528: INFO: Pod "execpodc572b": Phase="Running", Reason="", readiness=true. Elapsed: 2.01764322s + Aug 24 12:56:43.528: INFO: Pod "execpodc572b" satisfied condition "running" + Aug 24 12:56:44.532: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1910 exec execpodc572b -- /bin/sh -x -c nc -v -z -w 2 multi-endpoint-test 80' + Aug 24 12:56:44.852: INFO: stderr: "+ nc -v -z -w 2 multi-endpoint-test 80\nConnection to multi-endpoint-test 80 port [tcp/http] succeeded!\n" + Aug 24 12:56:44.852: INFO: stdout: "" + Aug 24 12:56:44.852: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1910 exec execpodc572b -- /bin/sh -x -c nc -v -z -w 2 10.233.15.247 80' + Aug 24 12:56:45.113: INFO: stderr: "+ nc -v -z -w 2 10.233.15.247 80\nConnection to 10.233.15.247 80 port [tcp/http] succeeded!\n" + Aug 24 12:56:45.113: INFO: stdout: "" + Aug 24 12:56:45.113: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1910 exec execpodc572b -- /bin/sh -x -c nc -v -z -w 2 multi-endpoint-test 81' + Aug 24 12:56:45.404: INFO: stderr: "+ nc -v -z -w 2 multi-endpoint-test 81\nConnection to multi-endpoint-test 81 port [tcp/*] succeeded!\n" + Aug 24 12:56:45.404: INFO: stdout: "" + Aug 24 12:56:45.404: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1910 exec execpodc572b -- /bin/sh -x -c nc -v -z -w 2 10.233.15.247 81' + Aug 24 12:56:45.654: INFO: stderr: "+ nc -v -z -w 2 10.233.15.247 81\nConnection to 10.233.15.247 81 port [tcp/*] succeeded!\n" + Aug 24 12:56:45.654: INFO: stdout: "" + STEP: Deleting pod pod1 in namespace services-1910 08/24/23 12:56:45.655 + STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-1910 to expose endpoints map[pod2:[101]] 08/24/23 12:56:45.691 + Aug 24 12:56:45.790: INFO: successfully validated that service multi-endpoint-test in namespace services-1910 exposes endpoints map[pod2:[101]] + STEP: Deleting pod pod2 in namespace services-1910 08/24/23 12:56:45.79 + STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-1910 to expose endpoints map[] 08/24/23 12:56:45.858 + Aug 24 12:56:45.902: INFO: successfully validated that service multi-endpoint-test in namespace services-1910 exposes endpoints map[] + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:54.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 12:56:45.955: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-3906" for this suite. 07/29/23 16:51:54.6 + STEP: Destroying namespace "services-1910" for this suite. 08/24/23 12:56:45.98 << End Captured GinkgoWriter Output ------------------------------ -SSS +SS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should unconditionally reject operations on fail closed webhook [Conformance] - test/e2e/apimachinery/webhook.go:239 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-node] Pods + should patch a pod status [Conformance] + test/e2e/common/node/pods.go:1083 +[BeforeEach] [sig-node] Pods set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:54.62 -Jul 29 16:51:54.620: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 16:51:54.623 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:54.654 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:54.66 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:56:46.015 +Aug 24 12:56:46.015: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 12:56:46.026 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:46.05 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:46.055 +[BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 16:51:54.688 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:51:56.372 -STEP: Deploying the webhook pod 07/29/23 16:51:56.391 -STEP: Wait for the deployment to be ready 07/29/23 16:51:56.414 -Jul 29 16:51:56.427: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service 07/29/23 16:51:58.446 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:51:58.463 -Jul 29 16:51:59.465: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should unconditionally reject operations on fail closed webhook [Conformance] - test/e2e/apimachinery/webhook.go:239 -STEP: Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API 07/29/23 16:51:59.472 -STEP: create a namespace for the webhook 07/29/23 16:51:59.515 -STEP: create a configmap should be unconditionally rejected by the webhook 07/29/23 16:51:59.537 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 +[It] should patch a pod status [Conformance] + test/e2e/common/node/pods.go:1083 +STEP: Create a pod 08/24/23 12:56:46.064 +Aug 24 12:56:46.114: INFO: Waiting up to 5m0s for pod "pod-4jnpp" in namespace "pods-7894" to be "running" +Aug 24 12:56:46.122: INFO: Pod "pod-4jnpp": Phase="Pending", Reason="", readiness=false. Elapsed: 6.526876ms +Aug 24 12:56:48.133: INFO: Pod "pod-4jnpp": Phase="Running", Reason="", readiness=true. Elapsed: 2.017959639s +Aug 24 12:56:48.133: INFO: Pod "pod-4jnpp" satisfied condition "running" +STEP: patching /status 08/24/23 12:56:48.133 +Aug 24 12:56:48.151: INFO: Status Message: "Patched by e2e test" and Reason: "E2E" +[AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 -Jul 29 16:51:59.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 12:56:48.151: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-9775" for this suite. 07/29/23 16:51:59.741 -STEP: Destroying namespace "webhook-9775-markers" for this suite. 07/29/23 16:51:59.753 +STEP: Destroying namespace "pods-7894" for this suite. 08/24/23 12:56:48.164 ------------------------------ -• [SLOW TEST] [5.160 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should unconditionally reject operations on fail closed webhook [Conformance] - test/e2e/apimachinery/webhook.go:239 +• [2.164 seconds] +[sig-node] Pods +test/e2e/common/node/framework.go:23 + should patch a pod status [Conformance] + test/e2e/common/node/pods.go:1083 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Pods set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:54.62 - Jul 29 16:51:54.620: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 16:51:54.623 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:54.654 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:54.66 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:56:46.015 + Aug 24 12:56:46.015: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 12:56:46.026 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:46.05 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:46.055 + [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 16:51:54.688 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:51:56.372 - STEP: Deploying the webhook pod 07/29/23 16:51:56.391 - STEP: Wait for the deployment to be ready 07/29/23 16:51:56.414 - Jul 29 16:51:56.427: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set - STEP: Deploying the webhook service 07/29/23 16:51:58.446 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:51:58.463 - Jul 29 16:51:59.465: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should unconditionally reject operations on fail closed webhook [Conformance] - test/e2e/apimachinery/webhook.go:239 - STEP: Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API 07/29/23 16:51:59.472 - STEP: create a namespace for the webhook 07/29/23 16:51:59.515 - STEP: create a configmap should be unconditionally rejected by the webhook 07/29/23 16:51:59.537 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 + [It] should patch a pod status [Conformance] + test/e2e/common/node/pods.go:1083 + STEP: Create a pod 08/24/23 12:56:46.064 + Aug 24 12:56:46.114: INFO: Waiting up to 5m0s for pod "pod-4jnpp" in namespace "pods-7894" to be "running" + Aug 24 12:56:46.122: INFO: Pod "pod-4jnpp": Phase="Pending", Reason="", readiness=false. Elapsed: 6.526876ms + Aug 24 12:56:48.133: INFO: Pod "pod-4jnpp": Phase="Running", Reason="", readiness=true. Elapsed: 2.017959639s + Aug 24 12:56:48.133: INFO: Pod "pod-4jnpp" satisfied condition "running" + STEP: patching /status 08/24/23 12:56:48.133 + Aug 24 12:56:48.151: INFO: Status Message: "Patched by e2e test" and Reason: "E2E" + [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 - Jul 29 16:51:59.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 12:56:48.151: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-9775" for this suite. 07/29/23 16:51:59.741 - STEP: Destroying namespace "webhook-9775-markers" for this suite. 07/29/23 16:51:59.753 + STEP: Destroying namespace "pods-7894" for this suite. 08/24/23 12:56:48.164 << End Captured GinkgoWriter Output ------------------------------ -S +SSSSSSSS ------------------------------ -[sig-storage] Projected configMap - updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:124 -[BeforeEach] [sig-storage] Projected configMap +[sig-storage] EmptyDir volumes + should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:187 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:51:59.783 -Jul 29 16:51:59.783: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:51:59.787 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:59.824 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:59.83 -[BeforeEach] [sig-storage] Projected configMap +STEP: Creating a kubernetes client 08/24/23 12:56:48.183 +Aug 24 12:56:48.184: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:56:48.187 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:48.223 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:48.229 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[It] updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:124 -STEP: Creating projection with configMap that has name projected-configmap-test-upd-d1c4e1ac-68f5-4e04-9489-2cbc83b9c1b3 07/29/23 16:51:59.851 -STEP: Creating the pod 07/29/23 16:51:59.875 -Jul 29 16:51:59.926: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb" in namespace "projected-9234" to be "running and ready" -Jul 29 16:51:59.931: INFO: Pod "pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb": Phase="Pending", Reason="", readiness=false. Elapsed: 4.217508ms -Jul 29 16:51:59.931: INFO: The phase of Pod pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:52:01.938: INFO: Pod "pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb": Phase="Running", Reason="", readiness=true. Elapsed: 2.011194235s -Jul 29 16:52:01.938: INFO: The phase of Pod pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb is Running (Ready = true) -Jul 29 16:52:01.938: INFO: Pod "pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb" satisfied condition "running and ready" -STEP: Updating configmap projected-configmap-test-upd-d1c4e1ac-68f5-4e04-9489-2cbc83b9c1b3 07/29/23 16:52:01.955 -STEP: waiting to observe update in volume 07/29/23 16:52:01.963 -[AfterEach] [sig-storage] Projected configMap +[It] should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:187 +STEP: Creating a pod to test emptydir 0777 on node default medium 08/24/23 12:56:48.235 +Aug 24 12:56:48.268: INFO: Waiting up to 5m0s for pod "pod-9a2b6cb0-2736-44ef-8729-c01a432c853e" in namespace "emptydir-9373" to be "Succeeded or Failed" +Aug 24 12:56:48.279: INFO: Pod "pod-9a2b6cb0-2736-44ef-8729-c01a432c853e": Phase="Pending", Reason="", readiness=false. Elapsed: 10.387842ms +Aug 24 12:56:50.292: INFO: Pod "pod-9a2b6cb0-2736-44ef-8729-c01a432c853e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02347984s +Aug 24 12:56:52.289: INFO: Pod "pod-9a2b6cb0-2736-44ef-8729-c01a432c853e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020246243s +STEP: Saw pod success 08/24/23 12:56:52.289 +Aug 24 12:56:52.290: INFO: Pod "pod-9a2b6cb0-2736-44ef-8729-c01a432c853e" satisfied condition "Succeeded or Failed" +Aug 24 12:56:52.297: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-9a2b6cb0-2736-44ef-8729-c01a432c853e container test-container: +STEP: delete the pod 08/24/23 12:56:52.307 +Aug 24 12:56:52.331: INFO: Waiting for pod pod-9a2b6cb0-2736-44ef-8729-c01a432c853e to disappear +Aug 24 12:56:52.339: INFO: Pod pod-9a2b6cb0-2736-44ef-8729-c01a432c853e no longer exists +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:52:03.996: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected configMap +Aug 24 12:56:52.339: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "projected-9234" for this suite. 07/29/23 16:52:04.005 +STEP: Destroying namespace "emptydir-9373" for this suite. 08/24/23 12:56:52.351 ------------------------------ -• [4.233 seconds] -[sig-storage] Projected configMap +• [4.183 seconds] +[sig-storage] EmptyDir volumes test/e2e/common/storage/framework.go:23 - updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:124 + should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:187 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected configMap + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:51:59.783 - Jul 29 16:51:59.783: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:51:59.787 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:51:59.824 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:51:59.83 - [BeforeEach] [sig-storage] Projected configMap + STEP: Creating a kubernetes client 08/24/23 12:56:48.183 + Aug 24 12:56:48.184: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:56:48.187 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:48.223 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:48.229 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [It] updates should be reflected in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:124 - STEP: Creating projection with configMap that has name projected-configmap-test-upd-d1c4e1ac-68f5-4e04-9489-2cbc83b9c1b3 07/29/23 16:51:59.851 - STEP: Creating the pod 07/29/23 16:51:59.875 - Jul 29 16:51:59.926: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb" in namespace "projected-9234" to be "running and ready" - Jul 29 16:51:59.931: INFO: Pod "pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb": Phase="Pending", Reason="", readiness=false. Elapsed: 4.217508ms - Jul 29 16:51:59.931: INFO: The phase of Pod pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:52:01.938: INFO: Pod "pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb": Phase="Running", Reason="", readiness=true. Elapsed: 2.011194235s - Jul 29 16:52:01.938: INFO: The phase of Pod pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb is Running (Ready = true) - Jul 29 16:52:01.938: INFO: Pod "pod-projected-configmaps-2b19de3b-3dc0-4898-ad74-978b994149bb" satisfied condition "running and ready" - STEP: Updating configmap projected-configmap-test-upd-d1c4e1ac-68f5-4e04-9489-2cbc83b9c1b3 07/29/23 16:52:01.955 - STEP: waiting to observe update in volume 07/29/23 16:52:01.963 - [AfterEach] [sig-storage] Projected configMap + [It] should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:187 + STEP: Creating a pod to test emptydir 0777 on node default medium 08/24/23 12:56:48.235 + Aug 24 12:56:48.268: INFO: Waiting up to 5m0s for pod "pod-9a2b6cb0-2736-44ef-8729-c01a432c853e" in namespace "emptydir-9373" to be "Succeeded or Failed" + Aug 24 12:56:48.279: INFO: Pod "pod-9a2b6cb0-2736-44ef-8729-c01a432c853e": Phase="Pending", Reason="", readiness=false. Elapsed: 10.387842ms + Aug 24 12:56:50.292: INFO: Pod "pod-9a2b6cb0-2736-44ef-8729-c01a432c853e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02347984s + Aug 24 12:56:52.289: INFO: Pod "pod-9a2b6cb0-2736-44ef-8729-c01a432c853e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020246243s + STEP: Saw pod success 08/24/23 12:56:52.289 + Aug 24 12:56:52.290: INFO: Pod "pod-9a2b6cb0-2736-44ef-8729-c01a432c853e" satisfied condition "Succeeded or Failed" + Aug 24 12:56:52.297: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-9a2b6cb0-2736-44ef-8729-c01a432c853e container test-container: + STEP: delete the pod 08/24/23 12:56:52.307 + Aug 24 12:56:52.331: INFO: Waiting for pod pod-9a2b6cb0-2736-44ef-8729-c01a432c853e to disappear + Aug 24 12:56:52.339: INFO: Pod pod-9a2b6cb0-2736-44ef-8729-c01a432c853e no longer exists + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:52:03.996: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected configMap + Aug 24 12:56:52.339: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "projected-9234" for this suite. 07/29/23 16:52:04.005 + STEP: Destroying namespace "emptydir-9373" for this suite. 08/24/23 12:56:52.351 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSS ------------------------------ -[sig-node] Security Context When creating a pod with privileged - should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:528 -[BeforeEach] [sig-node] Security Context +[sig-node] Kubelet when scheduling a busybox command that always fails in a pod + should have an terminated reason [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:110 +[BeforeEach] [sig-node] Kubelet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:52:04.025 -Jul 29 16:52:04.026: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename security-context-test 07/29/23 16:52:04.028 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:04.055 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:04.062 -[BeforeEach] [sig-node] Security Context +STEP: Creating a kubernetes client 08/24/23 12:56:52.371 +Aug 24 12:56:52.371: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubelet-test 08/24/23 12:56:52.373 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:52.403 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:52.411 +[BeforeEach] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Security Context - test/e2e/common/node/security_context.go:50 -[It] should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:528 -Jul 29 16:52:04.085: INFO: Waiting up to 5m0s for pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6" in namespace "security-context-test-8298" to be "Succeeded or Failed" -Jul 29 16:52:04.098: INFO: Pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6": Phase="Pending", Reason="", readiness=false. Elapsed: 13.382538ms -Jul 29 16:52:06.107: INFO: Pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021900573s -Jul 29 16:52:08.108: INFO: Pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022991853s -Jul 29 16:52:08.108: INFO: Pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6" satisfied condition "Succeeded or Failed" -Jul 29 16:52:08.124: INFO: Got logs for pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6": "ip: RTNETLINK answers: Operation not permitted\n" -[AfterEach] [sig-node] Security Context +[BeforeEach] [sig-node] Kubelet + test/e2e/common/node/kubelet.go:41 +[BeforeEach] when scheduling a busybox command that always fails in a pod + test/e2e/common/node/kubelet.go:85 +[It] should have an terminated reason [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:110 +[AfterEach] [sig-node] Kubelet test/e2e/framework/node/init/init.go:32 -Jul 29 16:52:08.124: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Security Context +Aug 24 12:56:56.509: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Security Context +[DeferCleanup (Each)] [sig-node] Kubelet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Security Context +[DeferCleanup (Each)] [sig-node] Kubelet tear down framework | framework.go:193 -STEP: Destroying namespace "security-context-test-8298" for this suite. 07/29/23 16:52:08.135 +STEP: Destroying namespace "kubelet-test-2668" for this suite. 08/24/23 12:56:56.517 ------------------------------ -• [4.125 seconds] -[sig-node] Security Context +• [4.163 seconds] +[sig-node] Kubelet test/e2e/common/node/framework.go:23 - When creating a pod with privileged - test/e2e/common/node/security_context.go:491 - should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:528 + when scheduling a busybox command that always fails in a pod + test/e2e/common/node/kubelet.go:82 + should have an terminated reason [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:110 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Security Context + [BeforeEach] [sig-node] Kubelet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:52:04.025 - Jul 29 16:52:04.026: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename security-context-test 07/29/23 16:52:04.028 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:04.055 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:04.062 - [BeforeEach] [sig-node] Security Context + STEP: Creating a kubernetes client 08/24/23 12:56:52.371 + Aug 24 12:56:52.371: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubelet-test 08/24/23 12:56:52.373 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:52.403 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:52.411 + [BeforeEach] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Security Context - test/e2e/common/node/security_context.go:50 - [It] should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/node/security_context.go:528 - Jul 29 16:52:04.085: INFO: Waiting up to 5m0s for pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6" in namespace "security-context-test-8298" to be "Succeeded or Failed" - Jul 29 16:52:04.098: INFO: Pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6": Phase="Pending", Reason="", readiness=false. Elapsed: 13.382538ms - Jul 29 16:52:06.107: INFO: Pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021900573s - Jul 29 16:52:08.108: INFO: Pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022991853s - Jul 29 16:52:08.108: INFO: Pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6" satisfied condition "Succeeded or Failed" - Jul 29 16:52:08.124: INFO: Got logs for pod "busybox-privileged-false-efd1a839-d614-4bee-8c12-9067134ddbd6": "ip: RTNETLINK answers: Operation not permitted\n" - [AfterEach] [sig-node] Security Context + [BeforeEach] [sig-node] Kubelet + test/e2e/common/node/kubelet.go:41 + [BeforeEach] when scheduling a busybox command that always fails in a pod + test/e2e/common/node/kubelet.go:85 + [It] should have an terminated reason [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:110 + [AfterEach] [sig-node] Kubelet test/e2e/framework/node/init/init.go:32 - Jul 29 16:52:08.124: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Security Context + Aug 24 12:56:56.509: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Security Context + [DeferCleanup (Each)] [sig-node] Kubelet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Security Context + [DeferCleanup (Each)] [sig-node] Kubelet tear down framework | framework.go:193 - STEP: Destroying namespace "security-context-test-8298" for this suite. 07/29/23 16:52:08.135 + STEP: Destroying namespace "kubelet-test-2668" for this suite. 08/24/23 12:56:56.517 << End Captured GinkgoWriter Output ------------------------------ SSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] EndpointSlice - should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] - test/e2e/network/endpointslice.go:102 -[BeforeEach] [sig-network] EndpointSlice +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + patching/updating a validating webhook should work [Conformance] + test/e2e/apimachinery/webhook.go:413 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:52:08.159 -Jul 29 16:52:08.159: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename endpointslice 07/29/23 16:52:08.165 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:08.194 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:08.2 -[BeforeEach] [sig-network] EndpointSlice +STEP: Creating a kubernetes client 08/24/23 12:56:56.546 +Aug 24 12:56:56.546: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 12:56:56.549 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:56.582 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:56.602 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] EndpointSlice - test/e2e/network/endpointslice.go:52 -[It] should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] - test/e2e/network/endpointslice.go:102 -[AfterEach] [sig-network] EndpointSlice +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 12:56:56.642 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:56:57.749 +STEP: Deploying the webhook pod 08/24/23 12:56:57.761 +STEP: Wait for the deployment to be ready 08/24/23 12:56:57.795 +Aug 24 12:56:57.812: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service 08/24/23 12:56:59.832 +STEP: Verifying the service has paired with the endpoint 08/24/23 12:56:59.846 +Aug 24 12:57:00.847: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] patching/updating a validating webhook should work [Conformance] + test/e2e/apimachinery/webhook.go:413 +STEP: Creating a validating webhook configuration 08/24/23 12:57:00.856 +STEP: Creating a configMap that does not comply to the validation webhook rules 08/24/23 12:57:00.887 +STEP: Updating a validating webhook configuration's rules to not include the create operation 08/24/23 12:57:00.906 +STEP: Creating a configMap that does not comply to the validation webhook rules 08/24/23 12:57:00.928 +STEP: Patching a validating webhook configuration's rules to include the create operation 08/24/23 12:57:00.948 +STEP: Creating a configMap that does not comply to the validation webhook rules 08/24/23 12:57:00.96 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:52:10.312: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] EndpointSlice +Aug 24 12:57:00.978: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] EndpointSlice +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] EndpointSlice +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "endpointslice-1497" for this suite. 07/29/23 16:52:10.322 +STEP: Destroying namespace "webhook-9350" for this suite. 08/24/23 12:57:01.104 +STEP: Destroying namespace "webhook-9350-markers" for this suite. 08/24/23 12:57:01.121 ------------------------------ -• [2.180 seconds] -[sig-network] EndpointSlice -test/e2e/network/common/framework.go:23 - should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] - test/e2e/network/endpointslice.go:102 +• [4.594 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + patching/updating a validating webhook should work [Conformance] + test/e2e/apimachinery/webhook.go:413 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] EndpointSlice + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:52:08.159 - Jul 29 16:52:08.159: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename endpointslice 07/29/23 16:52:08.165 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:08.194 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:08.2 - [BeforeEach] [sig-network] EndpointSlice + STEP: Creating a kubernetes client 08/24/23 12:56:56.546 + Aug 24 12:56:56.546: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 12:56:56.549 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:56:56.582 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:56:56.602 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] EndpointSlice - test/e2e/network/endpointslice.go:52 - [It] should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] - test/e2e/network/endpointslice.go:102 - [AfterEach] [sig-network] EndpointSlice + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 12:56:56.642 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 12:56:57.749 + STEP: Deploying the webhook pod 08/24/23 12:56:57.761 + STEP: Wait for the deployment to be ready 08/24/23 12:56:57.795 + Aug 24 12:56:57.812: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set + STEP: Deploying the webhook service 08/24/23 12:56:59.832 + STEP: Verifying the service has paired with the endpoint 08/24/23 12:56:59.846 + Aug 24 12:57:00.847: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] patching/updating a validating webhook should work [Conformance] + test/e2e/apimachinery/webhook.go:413 + STEP: Creating a validating webhook configuration 08/24/23 12:57:00.856 + STEP: Creating a configMap that does not comply to the validation webhook rules 08/24/23 12:57:00.887 + STEP: Updating a validating webhook configuration's rules to not include the create operation 08/24/23 12:57:00.906 + STEP: Creating a configMap that does not comply to the validation webhook rules 08/24/23 12:57:00.928 + STEP: Patching a validating webhook configuration's rules to include the create operation 08/24/23 12:57:00.948 + STEP: Creating a configMap that does not comply to the validation webhook rules 08/24/23 12:57:00.96 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:52:10.312: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] EndpointSlice + Aug 24 12:57:00.978: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] EndpointSlice + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] EndpointSlice + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "endpointslice-1497" for this suite. 07/29/23 16:52:10.322 + STEP: Destroying namespace "webhook-9350" for this suite. 08/24/23 12:57:01.104 + STEP: Destroying namespace "webhook-9350-markers" for this suite. 08/24/23 12:57:01.121 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ [sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] test/e2e/apps/daemon_set.go:385 [BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:52:10.34 -Jul 29 16:52:10.340: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename daemonsets 07/29/23 16:52:10.346 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:10.385 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:10.39 +STEP: Creating a kubernetes client 08/24/23 12:57:01.147 +Aug 24 12:57:01.148: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename daemonsets 08/24/23 12:57:01.155 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:01.209 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:01.217 [BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/apps/daemon_set.go:157 [It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] test/e2e/apps/daemon_set.go:385 -Jul 29 16:52:10.441: INFO: Creating simple daemon set daemon-set -STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 16:52:10.452 -Jul 29 16:52:10.471: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:52:10.471: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:52:11.492: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:52:11.492: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 -Jul 29 16:52:12.501: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:52:12.501: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 -Jul 29 16:52:13.492: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:52:13.492: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 -Jul 29 16:52:14.490: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 -Jul 29 16:52:14.491: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set -STEP: Update daemon pods image. 07/29/23 16:52:14.518 -STEP: Check that daemon pods images are updated. 07/29/23 16:52:14.547 -Jul 29 16:52:14.555: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. -Jul 29 16:52:14.555: INFO: Wrong image for pod: daemon-set-rfxp8. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. -Jul 29 16:52:14.555: INFO: Wrong image for pod: daemon-set-v86mm. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. -Jul 29 16:52:15.608: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. -Jul 29 16:52:15.608: INFO: Pod daemon-set-ttcdg is not available -Jul 29 16:52:15.608: INFO: Wrong image for pod: daemon-set-v86mm. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. -Jul 29 16:52:16.609: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. -Jul 29 16:52:16.609: INFO: Pod daemon-set-ttcdg is not available -Jul 29 16:52:16.610: INFO: Wrong image for pod: daemon-set-v86mm. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. -Jul 29 16:52:17.614: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. -Jul 29 16:52:18.613: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. -Jul 29 16:52:18.613: INFO: Pod daemon-set-k274p is not available -Jul 29 16:52:19.612: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. -Jul 29 16:52:19.612: INFO: Pod daemon-set-k274p is not available -Jul 29 16:52:21.609: INFO: Pod daemon-set-j27tf is not available -STEP: Check that daemon pods are still running on every node of the cluster. 07/29/23 16:52:21.62 -Jul 29 16:52:21.641: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:52:21.642: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 -Jul 29 16:52:22.658: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 -Jul 29 16:52:22.658: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 -Jul 29 16:52:23.721: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 -Jul 29 16:52:23.721: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set +Aug 24 12:57:01.278: INFO: Creating simple daemon set daemon-set +STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 12:57:01.292 +Aug 24 12:57:01.307: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:57:01.307: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:57:02.339: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:57:02.339: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:57:03.324: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 +Aug 24 12:57:03.324: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 12:57:04.327: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 +Aug 24 12:57:04.328: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set +STEP: Update daemon pods image. 08/24/23 12:57:04.363 +STEP: Check that daemon pods images are updated. 08/24/23 12:57:04.403 +Aug 24 12:57:04.411: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:04.412: INFO: Wrong image for pod: daemon-set-75pkb. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:04.412: INFO: Wrong image for pod: daemon-set-86vd6. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:05.434: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:05.434: INFO: Wrong image for pod: daemon-set-75pkb. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:06.438: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:06.438: INFO: Wrong image for pod: daemon-set-75pkb. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:07.434: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:07.434: INFO: Wrong image for pod: daemon-set-75pkb. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:07.434: INFO: Pod daemon-set-nsmmr is not available +Aug 24 12:57:08.439: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:08.439: INFO: Wrong image for pod: daemon-set-75pkb. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:08.440: INFO: Pod daemon-set-nsmmr is not available +Aug 24 12:57:09.433: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:10.435: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:10.435: INFO: Pod daemon-set-6449t is not available +Aug 24 12:57:11.435: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. +Aug 24 12:57:11.436: INFO: Pod daemon-set-6449t is not available +Aug 24 12:57:13.435: INFO: Pod daemon-set-qb4hw is not available +STEP: Check that daemon pods are still running on every node of the cluster. 08/24/23 12:57:13.45 +Aug 24 12:57:13.473: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 12:57:13.474: INFO: Node pe9deep4seen-3 is running 0 daemon pod, expected 1 +Aug 24 12:57:14.492: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 12:57:14.493: INFO: Node pe9deep4seen-3 is running 0 daemon pod, expected 1 +Aug 24 12:57:15.490: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 +Aug 24 12:57:15.490: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set [AfterEach] [sig-apps] Daemon set [Serial] test/e2e/apps/daemon_set.go:122 -STEP: Deleting DaemonSet "daemon-set" 07/29/23 16:52:23.76 -STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-2523, will wait for the garbage collector to delete the pods 07/29/23 16:52:23.76 -Jul 29 16:52:23.835: INFO: Deleting DaemonSet.extensions daemon-set took: 12.545275ms -Jul 29 16:52:24.036: INFO: Terminating DaemonSet.extensions daemon-set pods took: 201.002812ms -Jul 29 16:52:26.044: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 -Jul 29 16:52:26.045: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set -Jul 29 16:52:26.052: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"34074"},"items":null} +STEP: Deleting DaemonSet "daemon-set" 08/24/23 12:57:15.519 +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-6657, will wait for the garbage collector to delete the pods 08/24/23 12:57:15.52 +Aug 24 12:57:15.590: INFO: Deleting DaemonSet.extensions daemon-set took: 13.635162ms +Aug 24 12:57:15.691: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.908939ms +Aug 24 12:57:17.698: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 12:57:17.698: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set +Aug 24 12:57:17.704: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"32557"},"items":null} -Jul 29 16:52:26.058: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"34074"},"items":null} +Aug 24 12:57:17.709: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"32557"},"items":null} [AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:52:26.087: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:57:17.740: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "daemonsets-2523" for this suite. 07/29/23 16:52:26.098 +STEP: Destroying namespace "daemonsets-6657" for this suite. 08/24/23 12:57:17.751 ------------------------------ -• [SLOW TEST] [15.769 seconds] +• [SLOW TEST] [16.621 seconds] [sig-apps] Daemon set [Serial] test/e2e/apps/framework.go:23 should update pod when spec was updated and update strategy is RollingUpdate [Conformance] @@ -29564,2815 +29458,2906 @@ test/e2e/apps/framework.go:23 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:52:10.34 - Jul 29 16:52:10.340: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename daemonsets 07/29/23 16:52:10.346 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:10.385 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:10.39 + STEP: Creating a kubernetes client 08/24/23 12:57:01.147 + Aug 24 12:57:01.148: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename daemonsets 08/24/23 12:57:01.155 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:01.209 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:01.217 [BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/apps/daemon_set.go:157 [It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] test/e2e/apps/daemon_set.go:385 - Jul 29 16:52:10.441: INFO: Creating simple daemon set daemon-set - STEP: Check that daemon pods launch on every node of the cluster. 07/29/23 16:52:10.452 - Jul 29 16:52:10.471: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:52:10.471: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:52:11.492: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:52:11.492: INFO: Node wetuj3nuajog-1 is running 0 daemon pod, expected 1 - Jul 29 16:52:12.501: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:52:12.501: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 - Jul 29 16:52:13.492: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:52:13.492: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 - Jul 29 16:52:14.490: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 - Jul 29 16:52:14.491: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set - STEP: Update daemon pods image. 07/29/23 16:52:14.518 - STEP: Check that daemon pods images are updated. 07/29/23 16:52:14.547 - Jul 29 16:52:14.555: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. - Jul 29 16:52:14.555: INFO: Wrong image for pod: daemon-set-rfxp8. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. - Jul 29 16:52:14.555: INFO: Wrong image for pod: daemon-set-v86mm. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. - Jul 29 16:52:15.608: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. - Jul 29 16:52:15.608: INFO: Pod daemon-set-ttcdg is not available - Jul 29 16:52:15.608: INFO: Wrong image for pod: daemon-set-v86mm. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. - Jul 29 16:52:16.609: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. - Jul 29 16:52:16.609: INFO: Pod daemon-set-ttcdg is not available - Jul 29 16:52:16.610: INFO: Wrong image for pod: daemon-set-v86mm. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. - Jul 29 16:52:17.614: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. - Jul 29 16:52:18.613: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. - Jul 29 16:52:18.613: INFO: Pod daemon-set-k274p is not available - Jul 29 16:52:19.612: INFO: Wrong image for pod: daemon-set-5kfdx. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. - Jul 29 16:52:19.612: INFO: Pod daemon-set-k274p is not available - Jul 29 16:52:21.609: INFO: Pod daemon-set-j27tf is not available - STEP: Check that daemon pods are still running on every node of the cluster. 07/29/23 16:52:21.62 - Jul 29 16:52:21.641: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:52:21.642: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 - Jul 29 16:52:22.658: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 - Jul 29 16:52:22.658: INFO: Node wetuj3nuajog-3 is running 0 daemon pod, expected 1 - Jul 29 16:52:23.721: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 - Jul 29 16:52:23.721: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set + Aug 24 12:57:01.278: INFO: Creating simple daemon set daemon-set + STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 12:57:01.292 + Aug 24 12:57:01.307: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:57:01.307: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:57:02.339: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:57:02.339: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:57:03.324: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 1 + Aug 24 12:57:03.324: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 12:57:04.327: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 + Aug 24 12:57:04.328: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set + STEP: Update daemon pods image. 08/24/23 12:57:04.363 + STEP: Check that daemon pods images are updated. 08/24/23 12:57:04.403 + Aug 24 12:57:04.411: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:04.412: INFO: Wrong image for pod: daemon-set-75pkb. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:04.412: INFO: Wrong image for pod: daemon-set-86vd6. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:05.434: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:05.434: INFO: Wrong image for pod: daemon-set-75pkb. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:06.438: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:06.438: INFO: Wrong image for pod: daemon-set-75pkb. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:07.434: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:07.434: INFO: Wrong image for pod: daemon-set-75pkb. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:07.434: INFO: Pod daemon-set-nsmmr is not available + Aug 24 12:57:08.439: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:08.439: INFO: Wrong image for pod: daemon-set-75pkb. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:08.440: INFO: Pod daemon-set-nsmmr is not available + Aug 24 12:57:09.433: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:10.435: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:10.435: INFO: Pod daemon-set-6449t is not available + Aug 24 12:57:11.435: INFO: Wrong image for pod: daemon-set-5mbns. Expected: registry.k8s.io/e2e-test-images/agnhost:2.43, got: registry.k8s.io/e2e-test-images/httpd:2.4.38-4. + Aug 24 12:57:11.436: INFO: Pod daemon-set-6449t is not available + Aug 24 12:57:13.435: INFO: Pod daemon-set-qb4hw is not available + STEP: Check that daemon pods are still running on every node of the cluster. 08/24/23 12:57:13.45 + Aug 24 12:57:13.473: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 12:57:13.474: INFO: Node pe9deep4seen-3 is running 0 daemon pod, expected 1 + Aug 24 12:57:14.492: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 12:57:14.493: INFO: Node pe9deep4seen-3 is running 0 daemon pod, expected 1 + Aug 24 12:57:15.490: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 + Aug 24 12:57:15.490: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set [AfterEach] [sig-apps] Daemon set [Serial] test/e2e/apps/daemon_set.go:122 - STEP: Deleting DaemonSet "daemon-set" 07/29/23 16:52:23.76 - STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-2523, will wait for the garbage collector to delete the pods 07/29/23 16:52:23.76 - Jul 29 16:52:23.835: INFO: Deleting DaemonSet.extensions daemon-set took: 12.545275ms - Jul 29 16:52:24.036: INFO: Terminating DaemonSet.extensions daemon-set pods took: 201.002812ms - Jul 29 16:52:26.044: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 - Jul 29 16:52:26.045: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set - Jul 29 16:52:26.052: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"34074"},"items":null} + STEP: Deleting DaemonSet "daemon-set" 08/24/23 12:57:15.519 + STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-6657, will wait for the garbage collector to delete the pods 08/24/23 12:57:15.52 + Aug 24 12:57:15.590: INFO: Deleting DaemonSet.extensions daemon-set took: 13.635162ms + Aug 24 12:57:15.691: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.908939ms + Aug 24 12:57:17.698: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 12:57:17.698: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set + Aug 24 12:57:17.704: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"32557"},"items":null} - Jul 29 16:52:26.058: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"34074"},"items":null} + Aug 24 12:57:17.709: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"32557"},"items":null} [AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:52:26.087: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:57:17.740: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "daemonsets-2523" for this suite. 07/29/23 16:52:26.098 + STEP: Destroying namespace "daemonsets-6657" for this suite. 08/24/23 12:57:17.751 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-scheduling] SchedulerPredicates [Serial] + validates that NodeSelector is respected if not matching [Conformance] + test/e2e/scheduling/predicates.go:443 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:57:17.781 +Aug 24 12:57:17.781: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sched-pred 08/24/23 12:57:17.784 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:17.817 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:17.823 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:97 +Aug 24 12:57:17.835: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Aug 24 12:57:17.864: INFO: Waiting for terminating namespaces to be deleted... +Aug 24 12:57:17.870: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-1 before test +Aug 24 12:57:17.890: INFO: cilium-node-init-wqpdx from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.890: INFO: Container node-init ready: true, restart count 0 +Aug 24 12:57:17.890: INFO: cilium-wpzgb from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.890: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 12:57:17.890: INFO: coredns-787d4945fb-8jnm5 from kube-system started at 2023-08-24 11:24:04 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.890: INFO: Container coredns ready: true, restart count 0 +Aug 24 12:57:17.891: INFO: coredns-787d4945fb-d76z6 from kube-system started at 2023-08-24 11:24:07 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.891: INFO: Container coredns ready: true, restart count 0 +Aug 24 12:57:17.891: INFO: kube-addon-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.891: INFO: Container kube-addon-manager ready: true, restart count 0 +Aug 24 12:57:17.891: INFO: kube-apiserver-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.891: INFO: Container kube-apiserver ready: true, restart count 0 +Aug 24 12:57:17.891: INFO: kube-controller-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.892: INFO: Container kube-controller-manager ready: true, restart count 0 +Aug 24 12:57:17.892: INFO: kube-proxy-nr5bs from kube-system started at 2023-08-24 11:21:24 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.892: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 12:57:17.892: INFO: kube-scheduler-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.892: INFO: Container kube-scheduler ready: true, restart count 0 +Aug 24 12:57:17.892: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) +Aug 24 12:57:17.892: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:57:17.892: INFO: Container systemd-logs ready: true, restart count 0 +Aug 24 12:57:17.893: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-2 before test +Aug 24 12:57:17.909: INFO: cilium-node-init-95cbk from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.909: INFO: Container node-init ready: true, restart count 0 +Aug 24 12:57:17.909: INFO: cilium-operator-75f7897945-8qqz2 from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.909: INFO: Container cilium-operator ready: true, restart count 0 +Aug 24 12:57:17.909: INFO: cilium-rcknz from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.909: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 12:57:17.910: INFO: kube-addon-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:37 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.910: INFO: Container kube-addon-manager ready: true, restart count 0 +Aug 24 12:57:17.910: INFO: kube-apiserver-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.910: INFO: Container kube-apiserver ready: true, restart count 0 +Aug 24 12:57:17.910: INFO: kube-controller-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.910: INFO: Container kube-controller-manager ready: true, restart count 0 +Aug 24 12:57:17.910: INFO: kube-proxy-lm2dm from kube-system started at 2023-08-24 11:22:03 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.911: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 12:57:17.911: INFO: kube-scheduler-pe9deep4seen-2 from kube-system started at 2023-08-24 11:25:19 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.911: INFO: Container kube-scheduler ready: true, restart count 0 +Aug 24 12:57:17.911: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-nxmsl from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) +Aug 24 12:57:17.911: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:57:17.911: INFO: Container systemd-logs ready: true, restart count 0 +Aug 24 12:57:17.911: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-3 before test +Aug 24 12:57:17.926: INFO: cilium-node-init-pdcw9 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.926: INFO: Container node-init ready: true, restart count 0 +Aug 24 12:57:17.926: INFO: cilium-xgc44 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.926: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 12:57:17.926: INFO: kube-proxy-8vv8d from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.926: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 12:57:17.927: INFO: sonobuoy from sonobuoy started at 2023-08-24 11:38:19 +0000 UTC (1 container statuses recorded) +Aug 24 12:57:17.927: INFO: Container kube-sonobuoy ready: true, restart count 0 +Aug 24 12:57:17.927: INFO: sonobuoy-e2e-job-b3f52dde3e8a4a4e from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) +Aug 24 12:57:17.927: INFO: Container e2e ready: true, restart count 0 +Aug 24 12:57:17.927: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:57:17.927: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-p6l72 from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) +Aug 24 12:57:17.927: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 12:57:17.928: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that NodeSelector is respected if not matching [Conformance] + test/e2e/scheduling/predicates.go:443 +STEP: Trying to schedule Pod with nonempty NodeSelector. 08/24/23 12:57:17.928 +STEP: Considering event: +Type = [Warning], Name = [restricted-pod.177e530ff96d5dcc], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 node(s) didn't match Pod's node affinity/selector. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling..] 08/24/23 12:57:18 +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/framework/node/init/init.go:32 +Aug 24 12:57:19.002: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:88 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + tear down framework | framework.go:193 +STEP: Destroying namespace "sched-pred-631" for this suite. 08/24/23 12:57:19.016 +------------------------------ +• [1.259 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +test/e2e/scheduling/framework.go:40 + validates that NodeSelector is respected if not matching [Conformance] + test/e2e/scheduling/predicates.go:443 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:57:17.781 + Aug 24 12:57:17.781: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sched-pred 08/24/23 12:57:17.784 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:17.817 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:17.823 + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:97 + Aug 24 12:57:17.835: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready + Aug 24 12:57:17.864: INFO: Waiting for terminating namespaces to be deleted... + Aug 24 12:57:17.870: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-1 before test + Aug 24 12:57:17.890: INFO: cilium-node-init-wqpdx from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.890: INFO: Container node-init ready: true, restart count 0 + Aug 24 12:57:17.890: INFO: cilium-wpzgb from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.890: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 12:57:17.890: INFO: coredns-787d4945fb-8jnm5 from kube-system started at 2023-08-24 11:24:04 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.890: INFO: Container coredns ready: true, restart count 0 + Aug 24 12:57:17.891: INFO: coredns-787d4945fb-d76z6 from kube-system started at 2023-08-24 11:24:07 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.891: INFO: Container coredns ready: true, restart count 0 + Aug 24 12:57:17.891: INFO: kube-addon-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.891: INFO: Container kube-addon-manager ready: true, restart count 0 + Aug 24 12:57:17.891: INFO: kube-apiserver-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.891: INFO: Container kube-apiserver ready: true, restart count 0 + Aug 24 12:57:17.891: INFO: kube-controller-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.892: INFO: Container kube-controller-manager ready: true, restart count 0 + Aug 24 12:57:17.892: INFO: kube-proxy-nr5bs from kube-system started at 2023-08-24 11:21:24 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.892: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 12:57:17.892: INFO: kube-scheduler-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.892: INFO: Container kube-scheduler ready: true, restart count 0 + Aug 24 12:57:17.892: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) + Aug 24 12:57:17.892: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:57:17.892: INFO: Container systemd-logs ready: true, restart count 0 + Aug 24 12:57:17.893: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-2 before test + Aug 24 12:57:17.909: INFO: cilium-node-init-95cbk from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.909: INFO: Container node-init ready: true, restart count 0 + Aug 24 12:57:17.909: INFO: cilium-operator-75f7897945-8qqz2 from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.909: INFO: Container cilium-operator ready: true, restart count 0 + Aug 24 12:57:17.909: INFO: cilium-rcknz from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.909: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 12:57:17.910: INFO: kube-addon-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:37 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.910: INFO: Container kube-addon-manager ready: true, restart count 0 + Aug 24 12:57:17.910: INFO: kube-apiserver-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.910: INFO: Container kube-apiserver ready: true, restart count 0 + Aug 24 12:57:17.910: INFO: kube-controller-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.910: INFO: Container kube-controller-manager ready: true, restart count 0 + Aug 24 12:57:17.910: INFO: kube-proxy-lm2dm from kube-system started at 2023-08-24 11:22:03 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.911: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 12:57:17.911: INFO: kube-scheduler-pe9deep4seen-2 from kube-system started at 2023-08-24 11:25:19 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.911: INFO: Container kube-scheduler ready: true, restart count 0 + Aug 24 12:57:17.911: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-nxmsl from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) + Aug 24 12:57:17.911: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:57:17.911: INFO: Container systemd-logs ready: true, restart count 0 + Aug 24 12:57:17.911: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-3 before test + Aug 24 12:57:17.926: INFO: cilium-node-init-pdcw9 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.926: INFO: Container node-init ready: true, restart count 0 + Aug 24 12:57:17.926: INFO: cilium-xgc44 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.926: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 12:57:17.926: INFO: kube-proxy-8vv8d from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.926: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 12:57:17.927: INFO: sonobuoy from sonobuoy started at 2023-08-24 11:38:19 +0000 UTC (1 container statuses recorded) + Aug 24 12:57:17.927: INFO: Container kube-sonobuoy ready: true, restart count 0 + Aug 24 12:57:17.927: INFO: sonobuoy-e2e-job-b3f52dde3e8a4a4e from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) + Aug 24 12:57:17.927: INFO: Container e2e ready: true, restart count 0 + Aug 24 12:57:17.927: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:57:17.927: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-p6l72 from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) + Aug 24 12:57:17.927: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 12:57:17.928: INFO: Container systemd-logs ready: true, restart count 0 + [It] validates that NodeSelector is respected if not matching [Conformance] + test/e2e/scheduling/predicates.go:443 + STEP: Trying to schedule Pod with nonempty NodeSelector. 08/24/23 12:57:17.928 + STEP: Considering event: + Type = [Warning], Name = [restricted-pod.177e530ff96d5dcc], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 node(s) didn't match Pod's node affinity/selector. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling..] 08/24/23 12:57:18 + [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/framework/node/init/init.go:32 + Aug 24 12:57:19.002: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:88 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] + tear down framework | framework.go:193 + STEP: Destroying namespace "sched-pred-631" for this suite. 08/24/23 12:57:19.016 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSS +------------------------------ +[sig-apps] Deployment + deployment should delete old replica sets [Conformance] + test/e2e/apps/deployment.go:122 +[BeforeEach] [sig-apps] Deployment + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:57:19.043 +Aug 24 12:57:19.043: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename deployment 08/24/23 12:57:19.046 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:19.103 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:19.11 +[BeforeEach] [sig-apps] Deployment + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 +[It] deployment should delete old replica sets [Conformance] + test/e2e/apps/deployment.go:122 +Aug 24 12:57:19.134: INFO: Pod name cleanup-pod: Found 0 pods out of 1 +Aug 24 12:57:24.141: INFO: Pod name cleanup-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running 08/24/23 12:57:24.141 +Aug 24 12:57:24.141: INFO: Creating deployment test-cleanup-deployment +STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up 08/24/23 12:57:24.165 +[AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 +Aug 24 12:57:24.195: INFO: Deployment "test-cleanup-deployment": +&Deployment{ObjectMeta:{test-cleanup-deployment deployment-876 08bc5a4f-be9a-4f1e-8d95-37926dc55853 32645 1 2023-08-24 12:57:24 +0000 UTC map[name:cleanup-pod] map[] [] [] [{e2e.test Update apps/v1 2023-08-24 12:57:24 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006f32588 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:0,Replicas:0,UpdatedReplicas:0,AvailableReplicas:0,UnavailableReplicas:0,Conditions:[]DeploymentCondition{},ReadyReplicas:0,CollisionCount:nil,},} + +Aug 24 12:57:24.205: INFO: New ReplicaSet "test-cleanup-deployment-7698ff6f6b" of Deployment "test-cleanup-deployment": +&ReplicaSet{ObjectMeta:{test-cleanup-deployment-7698ff6f6b deployment-876 7502dbbe-1d0c-4d3d-8a7d-00489806acc5 32648 1 2023-08-24 12:57:24 +0000 UTC map[name:cleanup-pod pod-template-hash:7698ff6f6b] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-cleanup-deployment 08bc5a4f-be9a-4f1e-8d95-37926dc55853 0xc006f329e7 0xc006f329e8}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:57:24 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"08bc5a4f-be9a-4f1e-8d95-37926dc55853\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod-template-hash: 7698ff6f6b,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod pod-template-hash:7698ff6f6b] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006f32a78 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:0,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:57:24.205: INFO: All old ReplicaSets of Deployment "test-cleanup-deployment": +Aug 24 12:57:24.205: INFO: &ReplicaSet{ObjectMeta:{test-cleanup-controller deployment-876 0fc0b9cd-2e58-47aa-adda-8fd44c447314 32647 1 2023-08-24 12:57:19 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [{apps/v1 Deployment test-cleanup-deployment 08bc5a4f-be9a-4f1e-8d95-37926dc55853 0xc006f328b7 0xc006f328b8}] [] [{e2e.test Update apps/v1 2023-08-24 12:57:19 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:57:20 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-08-24 12:57:24 +0000 UTC FieldsV1 {"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"08bc5a4f-be9a-4f1e-8d95-37926dc55853\"}":{}}}} }]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc006f32978 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Aug 24 12:57:24.247: INFO: Pod "test-cleanup-controller-zvkcb" is available: +&Pod{ObjectMeta:{test-cleanup-controller-zvkcb test-cleanup-controller- deployment-876 38099be2-a563-4e61-8f02-4142b171c10d 32589 0 2023-08-24 12:57:19 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [{apps/v1 ReplicaSet test-cleanup-controller 0fc0b9cd-2e58-47aa-adda-8fd44c447314 0xc006f33027 0xc006f33028}] [] [{kube-controller-manager Update v1 2023-08-24 12:57:19 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"0fc0b9cd-2e58-47aa-adda-8fd44c447314\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:57:20 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.167\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-hs9xt,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hs9xt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:57:19 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:57:20 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:57:20 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:57:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.167,StartTime:2023-08-24 12:57:19 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:57:20 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://9924c9d01f0ff26fe5df3bab4f7b20ce58556748d133f422df1fbaa08d57577a,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.167,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Aug 24 12:57:24.248: INFO: Pod "test-cleanup-deployment-7698ff6f6b-d9l44" is not available: +&Pod{ObjectMeta:{test-cleanup-deployment-7698ff6f6b-d9l44 test-cleanup-deployment-7698ff6f6b- deployment-876 a6b80ce7-6633-4139-8155-3551287e2e45 32651 0 2023-08-24 12:57:24 +0000 UTC map[name:cleanup-pod pod-template-hash:7698ff6f6b] map[] [{apps/v1 ReplicaSet test-cleanup-deployment-7698ff6f6b 7502dbbe-1d0c-4d3d-8a7d-00489806acc5 0xc006f33227 0xc006f33228}] [] [{kube-controller-manager Update v1 2023-08-24 12:57:24 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"7502dbbe-1d0c-4d3d-8a7d-00489806acc5\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-xdmlr,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xdmlr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:57:24 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment + test/e2e/framework/node/init/init.go:32 +Aug 24 12:57:24.249: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Deployment + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-apps] Deployment + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-apps] Deployment + tear down framework | framework.go:193 +STEP: Destroying namespace "deployment-876" for this suite. 08/24/23 12:57:24.267 +------------------------------ +• [SLOW TEST] [5.239 seconds] +[sig-apps] Deployment +test/e2e/apps/framework.go:23 + deployment should delete old replica sets [Conformance] + test/e2e/apps/deployment.go:122 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-apps] Deployment + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:57:19.043 + Aug 24 12:57:19.043: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename deployment 08/24/23 12:57:19.046 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:19.103 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:19.11 + [BeforeEach] [sig-apps] Deployment + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:91 + [It] deployment should delete old replica sets [Conformance] + test/e2e/apps/deployment.go:122 + Aug 24 12:57:19.134: INFO: Pod name cleanup-pod: Found 0 pods out of 1 + Aug 24 12:57:24.141: INFO: Pod name cleanup-pod: Found 1 pods out of 1 + STEP: ensuring each pod is running 08/24/23 12:57:24.141 + Aug 24 12:57:24.141: INFO: Creating deployment test-cleanup-deployment + STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up 08/24/23 12:57:24.165 + [AfterEach] [sig-apps] Deployment + test/e2e/apps/deployment.go:84 + Aug 24 12:57:24.195: INFO: Deployment "test-cleanup-deployment": + &Deployment{ObjectMeta:{test-cleanup-deployment deployment-876 08bc5a4f-be9a-4f1e-8d95-37926dc55853 32645 1 2023-08-24 12:57:24 +0000 UTC map[name:cleanup-pod] map[] [] [] [{e2e.test Update apps/v1 2023-08-24 12:57:24 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006f32588 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:0,Replicas:0,UpdatedReplicas:0,AvailableReplicas:0,UnavailableReplicas:0,Conditions:[]DeploymentCondition{},ReadyReplicas:0,CollisionCount:nil,},} + + Aug 24 12:57:24.205: INFO: New ReplicaSet "test-cleanup-deployment-7698ff6f6b" of Deployment "test-cleanup-deployment": + &ReplicaSet{ObjectMeta:{test-cleanup-deployment-7698ff6f6b deployment-876 7502dbbe-1d0c-4d3d-8a7d-00489806acc5 32648 1 2023-08-24 12:57:24 +0000 UTC map[name:cleanup-pod pod-template-hash:7698ff6f6b] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-cleanup-deployment 08bc5a4f-be9a-4f1e-8d95-37926dc55853 0xc006f329e7 0xc006f329e8}] [] [{kube-controller-manager Update apps/v1 2023-08-24 12:57:24 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"08bc5a4f-be9a-4f1e-8d95-37926dc55853\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod-template-hash: 7698ff6f6b,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod pod-template-hash:7698ff6f6b] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006f32a78 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:0,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:57:24.205: INFO: All old ReplicaSets of Deployment "test-cleanup-deployment": + Aug 24 12:57:24.205: INFO: &ReplicaSet{ObjectMeta:{test-cleanup-controller deployment-876 0fc0b9cd-2e58-47aa-adda-8fd44c447314 32647 1 2023-08-24 12:57:19 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [{apps/v1 Deployment test-cleanup-deployment 08bc5a4f-be9a-4f1e-8d95-37926dc55853 0xc006f328b7 0xc006f328b8}] [] [{e2e.test Update apps/v1 2023-08-24 12:57:19 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-08-24 12:57:20 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-08-24 12:57:24 +0000 UTC FieldsV1 {"f:metadata":{"f:ownerReferences":{".":{},"k:{\"uid\":\"08bc5a4f-be9a-4f1e-8d95-37926dc55853\"}":{}}}} }]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc006f32978 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} + Aug 24 12:57:24.247: INFO: Pod "test-cleanup-controller-zvkcb" is available: + &Pod{ObjectMeta:{test-cleanup-controller-zvkcb test-cleanup-controller- deployment-876 38099be2-a563-4e61-8f02-4142b171c10d 32589 0 2023-08-24 12:57:19 +0000 UTC map[name:cleanup-pod pod:httpd] map[] [{apps/v1 ReplicaSet test-cleanup-controller 0fc0b9cd-2e58-47aa-adda-8fd44c447314 0xc006f33027 0xc006f33028}] [] [{kube-controller-manager Update v1 2023-08-24 12:57:19 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"0fc0b9cd-2e58-47aa-adda-8fd44c447314\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-08-24 12:57:20 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.167\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-hs9xt,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hs9xt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:57:19 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:57:20 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:57:20 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:57:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.130,PodIP:10.233.66.167,StartTime:2023-08-24 12:57:19 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-08-24 12:57:20 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://9924c9d01f0ff26fe5df3bab4f7b20ce58556748d133f422df1fbaa08d57577a,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.167,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + Aug 24 12:57:24.248: INFO: Pod "test-cleanup-deployment-7698ff6f6b-d9l44" is not available: + &Pod{ObjectMeta:{test-cleanup-deployment-7698ff6f6b-d9l44 test-cleanup-deployment-7698ff6f6b- deployment-876 a6b80ce7-6633-4139-8155-3551287e2e45 32651 0 2023-08-24 12:57:24 +0000 UTC map[name:cleanup-pod pod-template-hash:7698ff6f6b] map[] [{apps/v1 ReplicaSet test-cleanup-deployment-7698ff6f6b 7502dbbe-1d0c-4d3d-8a7d-00489806acc5 0xc006f33227 0xc006f33228}] [] [{kube-controller-manager Update v1 2023-08-24 12:57:24 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"7502dbbe-1d0c-4d3d-8a7d-00489806acc5\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-xdmlr,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xdmlr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:pe9deep4seen-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-08-24 12:57:24 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} + [AfterEach] [sig-apps] Deployment + test/e2e/framework/node/init/init.go:32 + Aug 24 12:57:24.249: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Deployment + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-apps] Deployment + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-apps] Deployment + tear down framework | framework.go:193 + STEP: Destroying namespace "deployment-876" for this suite. 08/24/23 12:57:24.267 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:84 +[BeforeEach] [sig-storage] Projected downwardAPI + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 12:57:24.286 +Aug 24 12:57:24.286: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 12:57:24.288 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:24.312 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:24.316 +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 +[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:84 +STEP: Creating a pod to test downward API volume plugin 08/24/23 12:57:24.321 +Aug 24 12:57:24.336: INFO: Waiting up to 5m0s for pod "downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514" in namespace "projected-5559" to be "Succeeded or Failed" +Aug 24 12:57:24.347: INFO: Pod "downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514": Phase="Pending", Reason="", readiness=false. Elapsed: 10.869105ms +Aug 24 12:57:26.354: INFO: Pod "downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017987536s +Aug 24 12:57:28.355: INFO: Pod "downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01932651s +STEP: Saw pod success 08/24/23 12:57:28.356 +Aug 24 12:57:28.356: INFO: Pod "downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514" satisfied condition "Succeeded or Failed" +Aug 24 12:57:28.362: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514 container client-container: +STEP: delete the pod 08/24/23 12:57:28.374 +Aug 24 12:57:28.404: INFO: Waiting for pod downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514 to disappear +Aug 24 12:57:28.410: INFO: Pod downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + test/e2e/framework/node/init/init.go:32 +Aug 24 12:57:28.410: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI + tear down framework | framework.go:193 +STEP: Destroying namespace "projected-5559" for this suite. 08/24/23 12:57:28.421 +------------------------------ +• [4.148 seconds] +[sig-storage] Projected downwardAPI +test/e2e/common/storage/framework.go:23 + should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:84 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-storage] Projected downwardAPI + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 12:57:24.286 + Aug 24 12:57:24.286: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 12:57:24.288 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:24.312 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:24.316 + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/framework/metrics/init/init.go:31 + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 + [It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:84 + STEP: Creating a pod to test downward API volume plugin 08/24/23 12:57:24.321 + Aug 24 12:57:24.336: INFO: Waiting up to 5m0s for pod "downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514" in namespace "projected-5559" to be "Succeeded or Failed" + Aug 24 12:57:24.347: INFO: Pod "downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514": Phase="Pending", Reason="", readiness=false. Elapsed: 10.869105ms + Aug 24 12:57:26.354: INFO: Pod "downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017987536s + Aug 24 12:57:28.355: INFO: Pod "downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01932651s + STEP: Saw pod success 08/24/23 12:57:28.356 + Aug 24 12:57:28.356: INFO: Pod "downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514" satisfied condition "Succeeded or Failed" + Aug 24 12:57:28.362: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514 container client-container: + STEP: delete the pod 08/24/23 12:57:28.374 + Aug 24 12:57:28.404: INFO: Waiting for pod downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514 to disappear + Aug 24 12:57:28.410: INFO: Pod downwardapi-volume-59e09f6d-8fd7-44a8-a9bb-c988d4aa7514 no longer exists + [AfterEach] [sig-storage] Projected downwardAPI + test/e2e/framework/node/init/init.go:32 + Aug 24 12:57:28.410: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + tear down framework | framework.go:193 + STEP: Destroying namespace "projected-5559" for this suite. 08/24/23 12:57:28.421 + << End Captured GinkgoWriter Output +------------------------------ +SSSSS ------------------------------ [sig-apps] ReplicaSet - should serve a basic image on each replica with a public image [Conformance] - test/e2e/apps/replica_set.go:111 + Replace and Patch tests [Conformance] + test/e2e/apps/replica_set.go:154 [BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:52:26.119 -Jul 29 16:52:26.119: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replicaset 07/29/23 16:52:26.121 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:26.149 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:26.153 +STEP: Creating a kubernetes client 08/24/23 12:57:28.439 +Aug 24 12:57:28.439: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replicaset 08/24/23 12:57:28.441 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:28.479 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:28.485 [BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 -[It] should serve a basic image on each replica with a public image [Conformance] - test/e2e/apps/replica_set.go:111 -Jul 29 16:52:26.158: INFO: Creating ReplicaSet my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6 -Jul 29 16:52:26.175: INFO: Pod name my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6: Found 0 pods out of 1 -Jul 29 16:52:31.189: INFO: Pod name my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6: Found 1 pods out of 1 -Jul 29 16:52:31.189: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6" is running -Jul 29 16:52:31.189: INFO: Waiting up to 5m0s for pod "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf" in namespace "replicaset-7076" to be "running" -Jul 29 16:52:31.197: INFO: Pod "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf": Phase="Running", Reason="", readiness=true. Elapsed: 8.37251ms -Jul 29 16:52:31.198: INFO: Pod "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf" satisfied condition "running" -Jul 29 16:52:31.198: INFO: Pod "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:52:26 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:52:27 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:52:27 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:52:26 +0000 UTC Reason: Message:}]) -Jul 29 16:52:31.198: INFO: Trying to dial the pod -Jul 29 16:52:36.226: INFO: Controller my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6: Got expected result from replica 1 [my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf]: "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf", 1 of 1 required successes so far +[It] Replace and Patch tests [Conformance] + test/e2e/apps/replica_set.go:154 +Aug 24 12:57:28.514: INFO: Pod name sample-pod: Found 0 pods out of 1 +Aug 24 12:57:33.526: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running 08/24/23 12:57:33.526 +STEP: Scaling up "test-rs" replicaset 08/24/23 12:57:33.527 +Aug 24 12:57:33.548: INFO: Updating replica set "test-rs" +STEP: patching the ReplicaSet 08/24/23 12:57:33.548 +W0824 12:57:33.563843 14 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" +Aug 24 12:57:33.579: INFO: observed ReplicaSet test-rs in namespace replicaset-4058 with ReadyReplicas 1, AvailableReplicas 1 +Aug 24 12:57:33.631: INFO: observed ReplicaSet test-rs in namespace replicaset-4058 with ReadyReplicas 1, AvailableReplicas 1 +Aug 24 12:57:33.656: INFO: observed ReplicaSet test-rs in namespace replicaset-4058 with ReadyReplicas 1, AvailableReplicas 1 +Aug 24 12:57:33.724: INFO: observed ReplicaSet test-rs in namespace replicaset-4058 with ReadyReplicas 1, AvailableReplicas 1 +Aug 24 12:57:35.022: INFO: observed ReplicaSet test-rs in namespace replicaset-4058 with ReadyReplicas 2, AvailableReplicas 2 +Aug 24 12:57:35.843: INFO: observed Replicaset test-rs in namespace replicaset-4058 with ReadyReplicas 3 found true [AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 -Jul 29 16:52:36.227: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 12:57:35.843: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 -STEP: Destroying namespace "replicaset-7076" for this suite. 07/29/23 16:52:36.239 +STEP: Destroying namespace "replicaset-4058" for this suite. 08/24/23 12:57:35.857 ------------------------------ -• [SLOW TEST] [10.146 seconds] +• [SLOW TEST] [7.449 seconds] [sig-apps] ReplicaSet test/e2e/apps/framework.go:23 - should serve a basic image on each replica with a public image [Conformance] - test/e2e/apps/replica_set.go:111 + Replace and Patch tests [Conformance] + test/e2e/apps/replica_set.go:154 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:52:26.119 - Jul 29 16:52:26.119: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replicaset 07/29/23 16:52:26.121 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:26.149 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:26.153 + STEP: Creating a kubernetes client 08/24/23 12:57:28.439 + Aug 24 12:57:28.439: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replicaset 08/24/23 12:57:28.441 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:28.479 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:28.485 [BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 - [It] should serve a basic image on each replica with a public image [Conformance] - test/e2e/apps/replica_set.go:111 - Jul 29 16:52:26.158: INFO: Creating ReplicaSet my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6 - Jul 29 16:52:26.175: INFO: Pod name my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6: Found 0 pods out of 1 - Jul 29 16:52:31.189: INFO: Pod name my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6: Found 1 pods out of 1 - Jul 29 16:52:31.189: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6" is running - Jul 29 16:52:31.189: INFO: Waiting up to 5m0s for pod "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf" in namespace "replicaset-7076" to be "running" - Jul 29 16:52:31.197: INFO: Pod "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf": Phase="Running", Reason="", readiness=true. Elapsed: 8.37251ms - Jul 29 16:52:31.198: INFO: Pod "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf" satisfied condition "running" - Jul 29 16:52:31.198: INFO: Pod "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:52:26 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:52:27 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:52:27 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2023-07-29 16:52:26 +0000 UTC Reason: Message:}]) - Jul 29 16:52:31.198: INFO: Trying to dial the pod - Jul 29 16:52:36.226: INFO: Controller my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6: Got expected result from replica 1 [my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf]: "my-hostname-basic-9a233407-f63a-4a3d-a624-81dbe472ebf6-qfjwf", 1 of 1 required successes so far + [It] Replace and Patch tests [Conformance] + test/e2e/apps/replica_set.go:154 + Aug 24 12:57:28.514: INFO: Pod name sample-pod: Found 0 pods out of 1 + Aug 24 12:57:33.526: INFO: Pod name sample-pod: Found 1 pods out of 1 + STEP: ensuring each pod is running 08/24/23 12:57:33.526 + STEP: Scaling up "test-rs" replicaset 08/24/23 12:57:33.527 + Aug 24 12:57:33.548: INFO: Updating replica set "test-rs" + STEP: patching the ReplicaSet 08/24/23 12:57:33.548 + W0824 12:57:33.563843 14 warnings.go:70] unknown field "spec.template.spec.TerminationGracePeriodSeconds" + Aug 24 12:57:33.579: INFO: observed ReplicaSet test-rs in namespace replicaset-4058 with ReadyReplicas 1, AvailableReplicas 1 + Aug 24 12:57:33.631: INFO: observed ReplicaSet test-rs in namespace replicaset-4058 with ReadyReplicas 1, AvailableReplicas 1 + Aug 24 12:57:33.656: INFO: observed ReplicaSet test-rs in namespace replicaset-4058 with ReadyReplicas 1, AvailableReplicas 1 + Aug 24 12:57:33.724: INFO: observed ReplicaSet test-rs in namespace replicaset-4058 with ReadyReplicas 1, AvailableReplicas 1 + Aug 24 12:57:35.022: INFO: observed ReplicaSet test-rs in namespace replicaset-4058 with ReadyReplicas 2, AvailableReplicas 2 + Aug 24 12:57:35.843: INFO: observed Replicaset test-rs in namespace replicaset-4058 with ReadyReplicas 3 found true [AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 - Jul 29 16:52:36.227: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 12:57:35.843: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 - STEP: Destroying namespace "replicaset-7076" for this suite. 07/29/23 16:52:36.239 + STEP: Destroying namespace "replicaset-4058" for this suite. 08/24/23 12:57:35.857 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-architecture] Conformance Tests - should have at least two untainted nodes [Conformance] - test/e2e/architecture/conformance.go:38 -[BeforeEach] [sig-architecture] Conformance Tests +[sig-cli] Kubectl client Kubectl cluster-info + should check if Kubernetes control plane services is included in cluster-info [Conformance] + test/e2e/kubectl/kubectl.go:1250 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:52:36.275 -Jul 29 16:52:36.275: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename conformance-tests 07/29/23 16:52:36.277 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:36.313 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:36.319 -[BeforeEach] [sig-architecture] Conformance Tests +STEP: Creating a kubernetes client 08/24/23 12:57:35.892 +Aug 24 12:57:35.893: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 12:57:35.894 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:35.927 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:35.931 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[It] should have at least two untainted nodes [Conformance] - test/e2e/architecture/conformance.go:38 -STEP: Getting node addresses 07/29/23 16:52:36.323 -Jul 29 16:52:36.324: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable -[AfterEach] [sig-architecture] Conformance Tests +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[It] should check if Kubernetes control plane services is included in cluster-info [Conformance] + test/e2e/kubectl/kubectl.go:1250 +STEP: validating cluster-info 08/24/23 12:57:35.936 +Aug 24 12:57:35.936: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-4945 cluster-info' +Aug 24 12:57:36.098: INFO: stderr: "" +Aug 24 12:57:36.098: INFO: stdout: "\x1b[0;32mKubernetes control plane\x1b[0m is running at \x1b[0;33mhttps://10.233.0.1:443\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 16:52:36.335: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-architecture] Conformance Tests +Aug 24 12:57:36.098: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-architecture] Conformance Tests +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-architecture] Conformance Tests +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "conformance-tests-7631" for this suite. 07/29/23 16:52:36.348 +STEP: Destroying namespace "kubectl-4945" for this suite. 08/24/23 12:57:36.107 ------------------------------ -• [0.087 seconds] -[sig-architecture] Conformance Tests -test/e2e/architecture/framework.go:23 - should have at least two untainted nodes [Conformance] - test/e2e/architecture/conformance.go:38 +• [0.229 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Kubectl cluster-info + test/e2e/kubectl/kubectl.go:1244 + should check if Kubernetes control plane services is included in cluster-info [Conformance] + test/e2e/kubectl/kubectl.go:1250 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-architecture] Conformance Tests + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:52:36.275 - Jul 29 16:52:36.275: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename conformance-tests 07/29/23 16:52:36.277 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:36.313 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:36.319 - [BeforeEach] [sig-architecture] Conformance Tests + STEP: Creating a kubernetes client 08/24/23 12:57:35.892 + Aug 24 12:57:35.893: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 12:57:35.894 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:35.927 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:35.931 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [It] should have at least two untainted nodes [Conformance] - test/e2e/architecture/conformance.go:38 - STEP: Getting node addresses 07/29/23 16:52:36.323 - Jul 29 16:52:36.324: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable - [AfterEach] [sig-architecture] Conformance Tests + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [It] should check if Kubernetes control plane services is included in cluster-info [Conformance] + test/e2e/kubectl/kubectl.go:1250 + STEP: validating cluster-info 08/24/23 12:57:35.936 + Aug 24 12:57:35.936: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-4945 cluster-info' + Aug 24 12:57:36.098: INFO: stderr: "" + Aug 24 12:57:36.098: INFO: stdout: "\x1b[0;32mKubernetes control plane\x1b[0m is running at \x1b[0;33mhttps://10.233.0.1:443\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 16:52:36.335: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-architecture] Conformance Tests + Aug 24 12:57:36.098: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-architecture] Conformance Tests + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-architecture] Conformance Tests + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "conformance-tests-7631" for this suite. 07/29/23 16:52:36.348 + STEP: Destroying namespace "kubectl-4945" for this suite. 08/24/23 12:57:36.107 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should include webhook resources in discovery documents [Conformance] - test/e2e/apimachinery/webhook.go:117 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-network] DNS + should provide /etc/hosts entries for the cluster [Conformance] + test/e2e/network/dns.go:117 +[BeforeEach] [sig-network] DNS set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:52:36.367 -Jul 29 16:52:36.367: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 16:52:36.369 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:36.401 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:36.408 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:57:36.125 +Aug 24 12:57:36.126: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename dns 08/24/23 12:57:36.128 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:36.155 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:36.16 +[BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 16:52:36.45 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:52:37.699 -STEP: Deploying the webhook pod 07/29/23 16:52:37.715 -STEP: Wait for the deployment to be ready 07/29/23 16:52:37.73 -Jul 29 16:52:37.746: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service 07/29/23 16:52:39.766 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:52:39.782 -Jul 29 16:52:40.783: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should include webhook resources in discovery documents [Conformance] - test/e2e/apimachinery/webhook.go:117 -STEP: fetching the /apis discovery document 07/29/23 16:52:40.793 -STEP: finding the admissionregistration.k8s.io API group in the /apis discovery document 07/29/23 16:52:40.796 -STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis discovery document 07/29/23 16:52:40.796 -STEP: fetching the /apis/admissionregistration.k8s.io discovery document 07/29/23 16:52:40.796 -STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis/admissionregistration.k8s.io discovery document 07/29/23 16:52:40.798 -STEP: fetching the /apis/admissionregistration.k8s.io/v1 discovery document 07/29/23 16:52:40.799 -STEP: finding mutatingwebhookconfigurations and validatingwebhookconfigurations resources in the /apis/admissionregistration.k8s.io/v1 discovery document 07/29/23 16:52:40.801 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[It] should provide /etc/hosts entries for the cluster [Conformance] + test/e2e/network/dns.go:117 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-7918.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-7918.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;sleep 1; done + 08/24/23 12:57:36.165 +STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-7918.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-7918.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;sleep 1; done + 08/24/23 12:57:36.166 +STEP: creating a pod to probe /etc/hosts 08/24/23 12:57:36.166 +STEP: submitting the pod to kubernetes 08/24/23 12:57:36.166 +Aug 24 12:57:36.182: INFO: Waiting up to 15m0s for pod "dns-test-ec226304-1c01-4c09-b5b0-0c2cfa2d52a5" in namespace "dns-7918" to be "running" +Aug 24 12:57:36.196: INFO: Pod "dns-test-ec226304-1c01-4c09-b5b0-0c2cfa2d52a5": Phase="Pending", Reason="", readiness=false. Elapsed: 14.026712ms +Aug 24 12:57:38.204: INFO: Pod "dns-test-ec226304-1c01-4c09-b5b0-0c2cfa2d52a5": Phase="Running", Reason="", readiness=true. Elapsed: 2.022162251s +Aug 24 12:57:38.204: INFO: Pod "dns-test-ec226304-1c01-4c09-b5b0-0c2cfa2d52a5" satisfied condition "running" +STEP: retrieving the pod 08/24/23 12:57:38.204 +STEP: looking for the results for each expected name from probers 08/24/23 12:57:38.21 +Aug 24 12:57:38.242: INFO: DNS probes using dns-7918/dns-test-ec226304-1c01-4c09-b5b0-0c2cfa2d52a5 succeeded + +STEP: deleting the pod 08/24/23 12:57:38.243 +[AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 -Jul 29 16:52:40.801: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 12:57:38.266: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-5516" for this suite. 07/29/23 16:52:40.905 -STEP: Destroying namespace "webhook-5516-markers" for this suite. 07/29/23 16:52:40.922 +STEP: Destroying namespace "dns-7918" for this suite. 08/24/23 12:57:38.282 ------------------------------ -• [4.578 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should include webhook resources in discovery documents [Conformance] - test/e2e/apimachinery/webhook.go:117 +• [2.176 seconds] +[sig-network] DNS +test/e2e/network/common/framework.go:23 + should provide /etc/hosts entries for the cluster [Conformance] + test/e2e/network/dns.go:117 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-network] DNS set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:52:36.367 - Jul 29 16:52:36.367: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 16:52:36.369 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:36.401 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:36.408 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:57:36.125 + Aug 24 12:57:36.126: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename dns 08/24/23 12:57:36.128 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:36.155 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:36.16 + [BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 16:52:36.45 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:52:37.699 - STEP: Deploying the webhook pod 07/29/23 16:52:37.715 - STEP: Wait for the deployment to be ready 07/29/23 16:52:37.73 - Jul 29 16:52:37.746: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set - STEP: Deploying the webhook service 07/29/23 16:52:39.766 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:52:39.782 - Jul 29 16:52:40.783: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should include webhook resources in discovery documents [Conformance] - test/e2e/apimachinery/webhook.go:117 - STEP: fetching the /apis discovery document 07/29/23 16:52:40.793 - STEP: finding the admissionregistration.k8s.io API group in the /apis discovery document 07/29/23 16:52:40.796 - STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis discovery document 07/29/23 16:52:40.796 - STEP: fetching the /apis/admissionregistration.k8s.io discovery document 07/29/23 16:52:40.796 - STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis/admissionregistration.k8s.io discovery document 07/29/23 16:52:40.798 - STEP: fetching the /apis/admissionregistration.k8s.io/v1 discovery document 07/29/23 16:52:40.799 - STEP: finding mutatingwebhookconfigurations and validatingwebhookconfigurations resources in the /apis/admissionregistration.k8s.io/v1 discovery document 07/29/23 16:52:40.801 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [It] should provide /etc/hosts entries for the cluster [Conformance] + test/e2e/network/dns.go:117 + STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-7918.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-7918.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;sleep 1; done + 08/24/23 12:57:36.165 + STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-7918.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-7918.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;sleep 1; done + 08/24/23 12:57:36.166 + STEP: creating a pod to probe /etc/hosts 08/24/23 12:57:36.166 + STEP: submitting the pod to kubernetes 08/24/23 12:57:36.166 + Aug 24 12:57:36.182: INFO: Waiting up to 15m0s for pod "dns-test-ec226304-1c01-4c09-b5b0-0c2cfa2d52a5" in namespace "dns-7918" to be "running" + Aug 24 12:57:36.196: INFO: Pod "dns-test-ec226304-1c01-4c09-b5b0-0c2cfa2d52a5": Phase="Pending", Reason="", readiness=false. Elapsed: 14.026712ms + Aug 24 12:57:38.204: INFO: Pod "dns-test-ec226304-1c01-4c09-b5b0-0c2cfa2d52a5": Phase="Running", Reason="", readiness=true. Elapsed: 2.022162251s + Aug 24 12:57:38.204: INFO: Pod "dns-test-ec226304-1c01-4c09-b5b0-0c2cfa2d52a5" satisfied condition "running" + STEP: retrieving the pod 08/24/23 12:57:38.204 + STEP: looking for the results for each expected name from probers 08/24/23 12:57:38.21 + Aug 24 12:57:38.242: INFO: DNS probes using dns-7918/dns-test-ec226304-1c01-4c09-b5b0-0c2cfa2d52a5 succeeded + + STEP: deleting the pod 08/24/23 12:57:38.243 + [AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 - Jul 29 16:52:40.801: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 12:57:38.266: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-5516" for this suite. 07/29/23 16:52:40.905 - STEP: Destroying namespace "webhook-5516-markers" for this suite. 07/29/23 16:52:40.922 + STEP: Destroying namespace "dns-7918" for this suite. 08/24/23 12:57:38.282 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Garbage collector - should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] - test/e2e/apimachinery/garbage_collector.go:550 -[BeforeEach] [sig-api-machinery] Garbage collector +[sig-apps] ReplicaSet + Replicaset should have a working scale subresource [Conformance] + test/e2e/apps/replica_set.go:143 +[BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:52:40.958 -Jul 29 16:52:40.958: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename gc 07/29/23 16:52:40.96 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:41 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:41.005 -[BeforeEach] [sig-api-machinery] Garbage collector +STEP: Creating a kubernetes client 08/24/23 12:57:38.303 +Aug 24 12:57:38.303: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replicaset 08/24/23 12:57:38.304 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:38.331 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:38.336 +[BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 -[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] - test/e2e/apimachinery/garbage_collector.go:550 -STEP: create the deployment 07/29/23 16:52:41.01 -STEP: Wait for the Deployment to create new ReplicaSet 07/29/23 16:52:41.02 -STEP: delete the deployment 07/29/23 16:52:41.556 -STEP: wait for deployment deletion to see if the garbage collector mistakenly deletes the rs 07/29/23 16:52:41.633 -STEP: Gathering metrics 07/29/23 16:52:42.231 -Jul 29 16:52:42.285: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" -Jul 29 16:52:42.297: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 11.964687ms -Jul 29 16:52:42.297: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) -Jul 29 16:52:42.297: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" -Jul 29 16:52:42.491: INFO: For apiserver_request_total: -For apiserver_request_latency_seconds: -For apiserver_init_events_total: -For garbage_collector_attempt_to_delete_queue_latency: -For garbage_collector_attempt_to_delete_work_duration: -For garbage_collector_attempt_to_orphan_queue_latency: -For garbage_collector_attempt_to_orphan_work_duration: -For garbage_collector_dirty_processing_latency_microseconds: -For garbage_collector_event_processing_latency_microseconds: -For garbage_collector_graph_changes_queue_latency: -For garbage_collector_graph_changes_work_duration: -For garbage_collector_orphan_processing_latency_microseconds: -For namespace_queue_latency: -For namespace_queue_latency_sum: -For namespace_queue_latency_count: -For namespace_retries: -For namespace_work_duration: -For namespace_work_duration_sum: -For namespace_work_duration_count: -For function_duration_seconds: -For errors_total: -For evicted_pods_total: - -[AfterEach] [sig-api-machinery] Garbage collector +[It] Replicaset should have a working scale subresource [Conformance] + test/e2e/apps/replica_set.go:143 +STEP: Creating replica set "test-rs" that asks for more than the allowed pod quota 08/24/23 12:57:38.34 +Aug 24 12:57:38.372: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running 08/24/23 12:57:38.372 +Aug 24 12:57:38.372: INFO: Waiting up to 5m0s for pod "test-rs-7zh6g" in namespace "replicaset-7669" to be "running" +Aug 24 12:57:38.407: INFO: Pod "test-rs-7zh6g": Phase="Pending", Reason="", readiness=false. Elapsed: 34.614979ms +Aug 24 12:57:40.417: INFO: Pod "test-rs-7zh6g": Phase="Running", Reason="", readiness=true. Elapsed: 2.044908739s +Aug 24 12:57:40.417: INFO: Pod "test-rs-7zh6g" satisfied condition "running" +STEP: getting scale subresource 08/24/23 12:57:40.417 +STEP: updating a scale subresource 08/24/23 12:57:40.422 +STEP: verifying the replicaset Spec.Replicas was modified 08/24/23 12:57:40.436 +STEP: Patch a scale subresource 08/24/23 12:57:40.443 +[AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 -Jul 29 16:52:42.491: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +Aug 24 12:57:40.488: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +[DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Garbage collector +[DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 -STEP: Destroying namespace "gc-7421" for this suite. 07/29/23 16:52:42.502 +STEP: Destroying namespace "replicaset-7669" for this suite. 08/24/23 12:57:40.5 ------------------------------ -• [1.557 seconds] -[sig-api-machinery] Garbage collector -test/e2e/apimachinery/framework.go:23 - should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] - test/e2e/apimachinery/garbage_collector.go:550 +• [2.209 seconds] +[sig-apps] ReplicaSet +test/e2e/apps/framework.go:23 + Replicaset should have a working scale subresource [Conformance] + test/e2e/apps/replica_set.go:143 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Garbage collector + [BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:52:40.958 - Jul 29 16:52:40.958: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename gc 07/29/23 16:52:40.96 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:41 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:41.005 - [BeforeEach] [sig-api-machinery] Garbage collector + STEP: Creating a kubernetes client 08/24/23 12:57:38.303 + Aug 24 12:57:38.303: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replicaset 08/24/23 12:57:38.304 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:38.331 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:38.336 + [BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 - [It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] - test/e2e/apimachinery/garbage_collector.go:550 - STEP: create the deployment 07/29/23 16:52:41.01 - STEP: Wait for the Deployment to create new ReplicaSet 07/29/23 16:52:41.02 - STEP: delete the deployment 07/29/23 16:52:41.556 - STEP: wait for deployment deletion to see if the garbage collector mistakenly deletes the rs 07/29/23 16:52:41.633 - STEP: Gathering metrics 07/29/23 16:52:42.231 - Jul 29 16:52:42.285: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" - Jul 29 16:52:42.297: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 11.964687ms - Jul 29 16:52:42.297: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) - Jul 29 16:52:42.297: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" - Jul 29 16:52:42.491: INFO: For apiserver_request_total: - For apiserver_request_latency_seconds: - For apiserver_init_events_total: - For garbage_collector_attempt_to_delete_queue_latency: - For garbage_collector_attempt_to_delete_work_duration: - For garbage_collector_attempt_to_orphan_queue_latency: - For garbage_collector_attempt_to_orphan_work_duration: - For garbage_collector_dirty_processing_latency_microseconds: - For garbage_collector_event_processing_latency_microseconds: - For garbage_collector_graph_changes_queue_latency: - For garbage_collector_graph_changes_work_duration: - For garbage_collector_orphan_processing_latency_microseconds: - For namespace_queue_latency: - For namespace_queue_latency_sum: - For namespace_queue_latency_count: - For namespace_retries: - For namespace_work_duration: - For namespace_work_duration_sum: - For namespace_work_duration_count: - For function_duration_seconds: - For errors_total: - For evicted_pods_total: - - [AfterEach] [sig-api-machinery] Garbage collector + [It] Replicaset should have a working scale subresource [Conformance] + test/e2e/apps/replica_set.go:143 + STEP: Creating replica set "test-rs" that asks for more than the allowed pod quota 08/24/23 12:57:38.34 + Aug 24 12:57:38.372: INFO: Pod name sample-pod: Found 1 pods out of 1 + STEP: ensuring each pod is running 08/24/23 12:57:38.372 + Aug 24 12:57:38.372: INFO: Waiting up to 5m0s for pod "test-rs-7zh6g" in namespace "replicaset-7669" to be "running" + Aug 24 12:57:38.407: INFO: Pod "test-rs-7zh6g": Phase="Pending", Reason="", readiness=false. Elapsed: 34.614979ms + Aug 24 12:57:40.417: INFO: Pod "test-rs-7zh6g": Phase="Running", Reason="", readiness=true. Elapsed: 2.044908739s + Aug 24 12:57:40.417: INFO: Pod "test-rs-7zh6g" satisfied condition "running" + STEP: getting scale subresource 08/24/23 12:57:40.417 + STEP: updating a scale subresource 08/24/23 12:57:40.422 + STEP: verifying the replicaset Spec.Replicas was modified 08/24/23 12:57:40.436 + STEP: Patch a scale subresource 08/24/23 12:57:40.443 + [AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 - Jul 29 16:52:42.491: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + Aug 24 12:57:40.488: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + [DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Garbage collector + [DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 - STEP: Destroying namespace "gc-7421" for this suite. 07/29/23 16:52:42.502 + STEP: Destroying namespace "replicaset-7669" for this suite. 08/24/23 12:57:40.5 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should mutate custom resource [Conformance] - test/e2e/apimachinery/webhook.go:291 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-node] Secrets + should be consumable via the environment [NodeConformance] [Conformance] + test/e2e/common/node/secrets.go:95 +[BeforeEach] [sig-node] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:52:42.517 -Jul 29 16:52:42.518: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 16:52:42.52 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:42.545 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:42.549 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:57:40.516 +Aug 24 12:57:40.516: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 12:57:40.518 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:40.548 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:40.551 +[BeforeEach] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 16:52:42.578 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:52:43.541 -STEP: Deploying the webhook pod 07/29/23 16:52:43.551 -STEP: Wait for the deployment to be ready 07/29/23 16:52:43.57 -Jul 29 16:52:43.587: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service 07/29/23 16:52:45.611 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:52:45.638 -Jul 29 16:52:46.639: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should mutate custom resource [Conformance] - test/e2e/apimachinery/webhook.go:291 -Jul 29 16:52:46.650: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Registering the mutating webhook for custom resource e2e-test-webhook-2175-crds.webhook.example.com via the AdmissionRegistration API 07/29/23 16:52:47.176 -STEP: Creating a custom resource that should be mutated by the webhook 07/29/23 16:52:47.209 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[It] should be consumable via the environment [NodeConformance] [Conformance] + test/e2e/common/node/secrets.go:95 +STEP: creating secret secrets-3446/secret-test-a14a0f66-f33e-4174-89aa-97ae72da6655 08/24/23 12:57:40.555 +STEP: Creating a pod to test consume secrets 08/24/23 12:57:40.561 +Aug 24 12:57:40.573: INFO: Waiting up to 5m0s for pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c" in namespace "secrets-3446" to be "Succeeded or Failed" +Aug 24 12:57:40.579: INFO: Pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c": Phase="Pending", Reason="", readiness=false. Elapsed: 6.064973ms +Aug 24 12:57:42.589: INFO: Pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016450896s +Aug 24 12:57:44.589: INFO: Pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.016178413s +Aug 24 12:57:46.590: INFO: Pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.017191857s +STEP: Saw pod success 08/24/23 12:57:46.59 +Aug 24 12:57:46.590: INFO: Pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c" satisfied condition "Succeeded or Failed" +Aug 24 12:57:46.596: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c container env-test: +STEP: delete the pod 08/24/23 12:57:46.614 +Aug 24 12:57:46.643: INFO: Waiting for pod pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c to disappear +Aug 24 12:57:46.650: INFO: Pod pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c no longer exists +[AfterEach] [sig-node] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 16:52:49.939: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 12:57:46.650: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Secrets dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-2446" for this suite. 07/29/23 16:52:50.072 -STEP: Destroying namespace "webhook-2446-markers" for this suite. 07/29/23 16:52:50.094 +STEP: Destroying namespace "secrets-3446" for this suite. 08/24/23 12:57:46.669 ------------------------------ -• [SLOW TEST] [7.603 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should mutate custom resource [Conformance] - test/e2e/apimachinery/webhook.go:291 +• [SLOW TEST] [6.175 seconds] +[sig-node] Secrets +test/e2e/common/node/framework.go:23 + should be consumable via the environment [NodeConformance] [Conformance] + test/e2e/common/node/secrets.go:95 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:52:42.517 - Jul 29 16:52:42.518: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 16:52:42.52 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:42.545 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:42.549 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:57:40.516 + Aug 24 12:57:40.516: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 12:57:40.518 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:40.548 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:40.551 + [BeforeEach] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 16:52:42.578 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:52:43.541 - STEP: Deploying the webhook pod 07/29/23 16:52:43.551 - STEP: Wait for the deployment to be ready 07/29/23 16:52:43.57 - Jul 29 16:52:43.587: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set - STEP: Deploying the webhook service 07/29/23 16:52:45.611 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:52:45.638 - Jul 29 16:52:46.639: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should mutate custom resource [Conformance] - test/e2e/apimachinery/webhook.go:291 - Jul 29 16:52:46.650: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Registering the mutating webhook for custom resource e2e-test-webhook-2175-crds.webhook.example.com via the AdmissionRegistration API 07/29/23 16:52:47.176 - STEP: Creating a custom resource that should be mutated by the webhook 07/29/23 16:52:47.209 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [It] should be consumable via the environment [NodeConformance] [Conformance] + test/e2e/common/node/secrets.go:95 + STEP: creating secret secrets-3446/secret-test-a14a0f66-f33e-4174-89aa-97ae72da6655 08/24/23 12:57:40.555 + STEP: Creating a pod to test consume secrets 08/24/23 12:57:40.561 + Aug 24 12:57:40.573: INFO: Waiting up to 5m0s for pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c" in namespace "secrets-3446" to be "Succeeded or Failed" + Aug 24 12:57:40.579: INFO: Pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c": Phase="Pending", Reason="", readiness=false. Elapsed: 6.064973ms + Aug 24 12:57:42.589: INFO: Pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016450896s + Aug 24 12:57:44.589: INFO: Pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.016178413s + Aug 24 12:57:46.590: INFO: Pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.017191857s + STEP: Saw pod success 08/24/23 12:57:46.59 + Aug 24 12:57:46.590: INFO: Pod "pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c" satisfied condition "Succeeded or Failed" + Aug 24 12:57:46.596: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c container env-test: + STEP: delete the pod 08/24/23 12:57:46.614 + Aug 24 12:57:46.643: INFO: Waiting for pod pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c to disappear + Aug 24 12:57:46.650: INFO: Pod pod-configmaps-86e53553-0ed3-44dc-b405-094887f0218c no longer exists + [AfterEach] [sig-node] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 16:52:49.939: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 12:57:46.650: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-2446" for this suite. 07/29/23 16:52:50.072 - STEP: Destroying namespace "webhook-2446-markers" for this suite. 07/29/23 16:52:50.094 + STEP: Destroying namespace "secrets-3446" for this suite. 08/24/23 12:57:46.669 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSS ------------------------------ -[sig-api-machinery] Watchers - should be able to start watching from a specific resource version [Conformance] - test/e2e/apimachinery/watch.go:142 -[BeforeEach] [sig-api-machinery] Watchers +[sig-storage] EmptyDir volumes + should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:127 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:52:50.133 -Jul 29 16:52:50.133: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename watch 07/29/23 16:52:50.14 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:50.169 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:50.174 -[BeforeEach] [sig-api-machinery] Watchers +STEP: Creating a kubernetes client 08/24/23 12:57:46.702 +Aug 24 12:57:46.702: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 12:57:46.707 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:46.747 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:46.754 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[It] should be able to start watching from a specific resource version [Conformance] - test/e2e/apimachinery/watch.go:142 -STEP: creating a new configmap 07/29/23 16:52:50.184 -STEP: modifying the configmap once 07/29/23 16:52:50.194 -STEP: modifying the configmap a second time 07/29/23 16:52:50.209 -STEP: deleting the configmap 07/29/23 16:52:50.234 -STEP: creating a watch on configmaps from the resource version returned by the first update 07/29/23 16:52:50.254 -STEP: Expecting to observe notifications for all changes to the configmap after the first update 07/29/23 16:52:50.255 -Jul 29 16:52:50.256: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-802 d691f24b-b943-43bb-9547-fd079d40b359 34408 0 2023-07-29 16:52:50 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2023-07-29 16:52:50 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:52:50.256: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-802 d691f24b-b943-43bb-9547-fd079d40b359 34409 0 2023-07-29 16:52:50 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2023-07-29 16:52:50 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -[AfterEach] [sig-api-machinery] Watchers +[It] should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:127 +STEP: Creating a pod to test emptydir 0644 on tmpfs 08/24/23 12:57:46.759 +Aug 24 12:57:46.780: INFO: Waiting up to 5m0s for pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5" in namespace "emptydir-7116" to be "Succeeded or Failed" +Aug 24 12:57:46.786: INFO: Pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5": Phase="Pending", Reason="", readiness=false. Elapsed: 5.49543ms +Aug 24 12:57:48.797: INFO: Pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016890481s +Aug 24 12:57:50.794: INFO: Pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5": Phase="Pending", Reason="", readiness=false. Elapsed: 4.01398961s +Aug 24 12:57:52.795: INFO: Pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.014241584s +STEP: Saw pod success 08/24/23 12:57:52.795 +Aug 24 12:57:52.795: INFO: Pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5" satisfied condition "Succeeded or Failed" +Aug 24 12:57:52.804: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5 container test-container: +STEP: delete the pod 08/24/23 12:57:52.817 +Aug 24 12:57:52.842: INFO: Waiting for pod pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5 to disappear +Aug 24 12:57:52.848: INFO: Pod pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:52:50.256: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Watchers +Aug 24 12:57:52.849: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Watchers +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Watchers +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "watch-802" for this suite. 07/29/23 16:52:50.265 +STEP: Destroying namespace "emptydir-7116" for this suite. 08/24/23 12:57:52.863 ------------------------------ -• [0.150 seconds] -[sig-api-machinery] Watchers -test/e2e/apimachinery/framework.go:23 - should be able to start watching from a specific resource version [Conformance] - test/e2e/apimachinery/watch.go:142 +• [SLOW TEST] [6.177 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:127 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Watchers + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:52:50.133 - Jul 29 16:52:50.133: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename watch 07/29/23 16:52:50.14 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:50.169 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:50.174 - [BeforeEach] [sig-api-machinery] Watchers + STEP: Creating a kubernetes client 08/24/23 12:57:46.702 + Aug 24 12:57:46.702: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 12:57:46.707 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:46.747 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:46.754 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [It] should be able to start watching from a specific resource version [Conformance] - test/e2e/apimachinery/watch.go:142 - STEP: creating a new configmap 07/29/23 16:52:50.184 - STEP: modifying the configmap once 07/29/23 16:52:50.194 - STEP: modifying the configmap a second time 07/29/23 16:52:50.209 - STEP: deleting the configmap 07/29/23 16:52:50.234 - STEP: creating a watch on configmaps from the resource version returned by the first update 07/29/23 16:52:50.254 - STEP: Expecting to observe notifications for all changes to the configmap after the first update 07/29/23 16:52:50.255 - Jul 29 16:52:50.256: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-802 d691f24b-b943-43bb-9547-fd079d40b359 34408 0 2023-07-29 16:52:50 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2023-07-29 16:52:50 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:52:50.256: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-802 d691f24b-b943-43bb-9547-fd079d40b359 34409 0 2023-07-29 16:52:50 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2023-07-29 16:52:50 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} - [AfterEach] [sig-api-machinery] Watchers + [It] should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:127 + STEP: Creating a pod to test emptydir 0644 on tmpfs 08/24/23 12:57:46.759 + Aug 24 12:57:46.780: INFO: Waiting up to 5m0s for pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5" in namespace "emptydir-7116" to be "Succeeded or Failed" + Aug 24 12:57:46.786: INFO: Pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5": Phase="Pending", Reason="", readiness=false. Elapsed: 5.49543ms + Aug 24 12:57:48.797: INFO: Pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016890481s + Aug 24 12:57:50.794: INFO: Pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5": Phase="Pending", Reason="", readiness=false. Elapsed: 4.01398961s + Aug 24 12:57:52.795: INFO: Pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.014241584s + STEP: Saw pod success 08/24/23 12:57:52.795 + Aug 24 12:57:52.795: INFO: Pod "pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5" satisfied condition "Succeeded or Failed" + Aug 24 12:57:52.804: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5 container test-container: + STEP: delete the pod 08/24/23 12:57:52.817 + Aug 24 12:57:52.842: INFO: Waiting for pod pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5 to disappear + Aug 24 12:57:52.848: INFO: Pod pod-7f4f2ae2-7531-406d-9b7b-f8392fe398b5 no longer exists + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:52:50.256: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Watchers + Aug 24 12:57:52.849: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Watchers + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Watchers + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "watch-802" for this suite. 07/29/23 16:52:50.265 + STEP: Destroying namespace "emptydir-7116" for this suite. 08/24/23 12:57:52.863 << End Captured GinkgoWriter Output ------------------------------ -[sig-node] Probing container - should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:215 -[BeforeEach] [sig-node] Probing container +SSSSSSSSS +------------------------------ +[sig-node] Pods + should be submitted and removed [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:226 +[BeforeEach] [sig-node] Pods set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:52:50.283 -Jul 29 16:52:50.283: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-probe 07/29/23 16:52:50.288 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:50.319 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:50.329 -[BeforeEach] [sig-node] Probing container +STEP: Creating a kubernetes client 08/24/23 12:57:52.883 +Aug 24 12:57:52.883: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 12:57:52.885 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:52.916 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:52.921 +[BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 -[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:215 -STEP: Creating pod test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad in namespace container-probe-5707 07/29/23 16:52:50.338 -Jul 29 16:52:50.362: INFO: Waiting up to 5m0s for pod "test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad" in namespace "container-probe-5707" to be "not pending" -Jul 29 16:52:50.370: INFO: Pod "test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad": Phase="Pending", Reason="", readiness=false. Elapsed: 7.807108ms -Jul 29 16:52:52.380: INFO: Pod "test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad": Phase="Running", Reason="", readiness=true. Elapsed: 2.017851415s -Jul 29 16:52:52.380: INFO: Pod "test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad" satisfied condition "not pending" -Jul 29 16:52:52.380: INFO: Started pod test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad in namespace container-probe-5707 -STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 16:52:52.38 -Jul 29 16:52:52.386: INFO: Initial restart count of pod test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad is 0 -STEP: deleting the pod 07/29/23 16:56:53.563 -[AfterEach] [sig-node] Probing container +[BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 +[It] should be submitted and removed [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:226 +STEP: creating the pod 08/24/23 12:57:52.926 +STEP: setting up watch 08/24/23 12:57:52.927 +STEP: submitting the pod to kubernetes 08/24/23 12:57:53.035 +STEP: verifying the pod is in kubernetes 08/24/23 12:57:53.065 +STEP: verifying pod creation was observed 08/24/23 12:57:53.074 +Aug 24 12:57:53.075: INFO: Waiting up to 5m0s for pod "pod-submit-remove-77eadba3-a187-4125-9206-949783fadb8a" in namespace "pods-6577" to be "running" +Aug 24 12:57:53.098: INFO: Pod "pod-submit-remove-77eadba3-a187-4125-9206-949783fadb8a": Phase="Pending", Reason="", readiness=false. Elapsed: 23.450218ms +Aug 24 12:57:55.106: INFO: Pod "pod-submit-remove-77eadba3-a187-4125-9206-949783fadb8a": Phase="Running", Reason="", readiness=true. Elapsed: 2.031184253s +Aug 24 12:57:55.106: INFO: Pod "pod-submit-remove-77eadba3-a187-4125-9206-949783fadb8a" satisfied condition "running" +STEP: deleting the pod gracefully 08/24/23 12:57:55.114 +STEP: verifying pod deletion was observed 08/24/23 12:57:55.135 +[AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 -Jul 29 16:56:53.594: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Probing container +Aug 24 12:57:57.917: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Probing container +[DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 -STEP: Destroying namespace "container-probe-5707" for this suite. 07/29/23 16:56:53.626 +STEP: Destroying namespace "pods-6577" for this suite. 08/24/23 12:57:57.927 ------------------------------ -• [SLOW TEST] [243.356 seconds] -[sig-node] Probing container +• [SLOW TEST] [5.057 seconds] +[sig-node] Pods test/e2e/common/node/framework.go:23 - should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:215 + should be submitted and removed [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:226 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Probing container + [BeforeEach] [sig-node] Pods set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:52:50.283 - Jul 29 16:52:50.283: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-probe 07/29/23 16:52:50.288 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:52:50.319 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:52:50.329 - [BeforeEach] [sig-node] Probing container + STEP: Creating a kubernetes client 08/24/23 12:57:52.883 + Aug 24 12:57:52.883: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 12:57:52.885 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:52.916 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:52.921 + [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Probing container - test/e2e/common/node/container_probe.go:63 - [It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] - test/e2e/common/node/container_probe.go:215 - STEP: Creating pod test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad in namespace container-probe-5707 07/29/23 16:52:50.338 - Jul 29 16:52:50.362: INFO: Waiting up to 5m0s for pod "test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad" in namespace "container-probe-5707" to be "not pending" - Jul 29 16:52:50.370: INFO: Pod "test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad": Phase="Pending", Reason="", readiness=false. Elapsed: 7.807108ms - Jul 29 16:52:52.380: INFO: Pod "test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad": Phase="Running", Reason="", readiness=true. Elapsed: 2.017851415s - Jul 29 16:52:52.380: INFO: Pod "test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad" satisfied condition "not pending" - Jul 29 16:52:52.380: INFO: Started pod test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad in namespace container-probe-5707 - STEP: checking the pod's current state and verifying that restartCount is present 07/29/23 16:52:52.38 - Jul 29 16:52:52.386: INFO: Initial restart count of pod test-webserver-2aa92c96-ddd3-40cf-bca5-e522a851dcad is 0 - STEP: deleting the pod 07/29/23 16:56:53.563 - [AfterEach] [sig-node] Probing container + [BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 + [It] should be submitted and removed [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:226 + STEP: creating the pod 08/24/23 12:57:52.926 + STEP: setting up watch 08/24/23 12:57:52.927 + STEP: submitting the pod to kubernetes 08/24/23 12:57:53.035 + STEP: verifying the pod is in kubernetes 08/24/23 12:57:53.065 + STEP: verifying pod creation was observed 08/24/23 12:57:53.074 + Aug 24 12:57:53.075: INFO: Waiting up to 5m0s for pod "pod-submit-remove-77eadba3-a187-4125-9206-949783fadb8a" in namespace "pods-6577" to be "running" + Aug 24 12:57:53.098: INFO: Pod "pod-submit-remove-77eadba3-a187-4125-9206-949783fadb8a": Phase="Pending", Reason="", readiness=false. Elapsed: 23.450218ms + Aug 24 12:57:55.106: INFO: Pod "pod-submit-remove-77eadba3-a187-4125-9206-949783fadb8a": Phase="Running", Reason="", readiness=true. Elapsed: 2.031184253s + Aug 24 12:57:55.106: INFO: Pod "pod-submit-remove-77eadba3-a187-4125-9206-949783fadb8a" satisfied condition "running" + STEP: deleting the pod gracefully 08/24/23 12:57:55.114 + STEP: verifying pod deletion was observed 08/24/23 12:57:55.135 + [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 - Jul 29 16:56:53.594: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Probing container + Aug 24 12:57:57.917: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Probing container + [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 - STEP: Destroying namespace "container-probe-5707" for this suite. 07/29/23 16:56:53.626 + STEP: Destroying namespace "pods-6577" for this suite. 08/24/23 12:57:57.927 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should mutate pod and apply defaults after mutation [Conformance] - test/e2e/apimachinery/webhook.go:264 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-api-machinery] ResourceQuota + should apply changes to a resourcequota status [Conformance] + test/e2e/apimachinery/resource_quota.go:1010 +[BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:56:53.642 -Jul 29 16:56:53.642: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 16:56:53.65 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:56:53.677 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:56:53.681 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 12:57:57.958 +Aug 24 12:57:57.958: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 12:57:57.96 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:57.991 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:58.001 +[BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 16:56:53.709 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:56:55.143 -STEP: Deploying the webhook pod 07/29/23 16:56:55.16 -STEP: Wait for the deployment to be ready 07/29/23 16:56:55.181 -Jul 29 16:56:55.196: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -STEP: Deploying the webhook service 07/29/23 16:56:57.217 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:56:57.236 -Jul 29 16:56:58.237: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should mutate pod and apply defaults after mutation [Conformance] - test/e2e/apimachinery/webhook.go:264 -STEP: Registering the mutating pod webhook via the AdmissionRegistration API 07/29/23 16:56:58.245 -STEP: create a pod that should be updated by the webhook 07/29/23 16:56:58.274 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[It] should apply changes to a resourcequota status [Conformance] + test/e2e/apimachinery/resource_quota.go:1010 +STEP: Creating resourceQuota "e2e-rq-status-954w6" 08/24/23 12:57:58.014 +Aug 24 12:57:58.039: INFO: Resource quota "e2e-rq-status-954w6" reports spec: hard cpu limit of 500m +Aug 24 12:57:58.040: INFO: Resource quota "e2e-rq-status-954w6" reports spec: hard memory limit of 500Mi +STEP: Updating resourceQuota "e2e-rq-status-954w6" /status 08/24/23 12:57:58.04 +STEP: Confirm /status for "e2e-rq-status-954w6" resourceQuota via watch 08/24/23 12:57:58.056 +Aug 24 12:57:58.062: INFO: observed resourceQuota "e2e-rq-status-954w6" in namespace "resourcequota-4515" with hard status: v1.ResourceList(nil) +Aug 24 12:57:58.062: INFO: Found resourceQuota "e2e-rq-status-954w6" in namespace "resourcequota-4515" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:500, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:524288000, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500Mi", Format:"BinarySI"}} +Aug 24 12:57:58.063: INFO: ResourceQuota "e2e-rq-status-954w6" /status was updated +STEP: Patching hard spec values for cpu & memory 08/24/23 12:57:58.087 +Aug 24 12:57:58.111: INFO: Resource quota "e2e-rq-status-954w6" reports spec: hard cpu limit of 1 +Aug 24 12:57:58.111: INFO: Resource quota "e2e-rq-status-954w6" reports spec: hard memory limit of 1Gi +STEP: Patching "e2e-rq-status-954w6" /status 08/24/23 12:57:58.111 +STEP: Confirm /status for "e2e-rq-status-954w6" resourceQuota via watch 08/24/23 12:57:58.121 +Aug 24 12:57:58.124: INFO: observed resourceQuota "e2e-rq-status-954w6" in namespace "resourcequota-4515" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:500, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:524288000, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500Mi", Format:"BinarySI"}} +Aug 24 12:57:58.124: INFO: Found resourceQuota "e2e-rq-status-954w6" in namespace "resourcequota-4515" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:1, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:1073741824, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1Gi", Format:"BinarySI"}} +Aug 24 12:57:58.124: INFO: ResourceQuota "e2e-rq-status-954w6" /status was patched +STEP: Get "e2e-rq-status-954w6" /status 08/24/23 12:57:58.124 +Aug 24 12:57:58.164: INFO: Resourcequota "e2e-rq-status-954w6" reports status: hard cpu of 1 +Aug 24 12:57:58.165: INFO: Resourcequota "e2e-rq-status-954w6" reports status: hard memory of 1Gi +STEP: Repatching "e2e-rq-status-954w6" /status before checking Spec is unchanged 08/24/23 12:57:58.174 +Aug 24 12:57:58.190: INFO: Resourcequota "e2e-rq-status-954w6" reports status: hard cpu of 2 +Aug 24 12:57:58.190: INFO: Resourcequota "e2e-rq-status-954w6" reports status: hard memory of 2Gi +Aug 24 12:57:58.194: INFO: Found resourceQuota "e2e-rq-status-954w6" in namespace "resourcequota-4515" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:2, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"2", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:2147483648, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"2Gi", Format:"BinarySI"}} +Aug 24 13:01:28.209: INFO: ResourceQuota "e2e-rq-status-954w6" Spec was unchanged and /status reset +[AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 16:56:58.318: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 13:01:28.209: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-2582" for this suite. 07/29/23 16:56:58.426 -STEP: Destroying namespace "webhook-2582-markers" for this suite. 07/29/23 16:56:58.456 +STEP: Destroying namespace "resourcequota-4515" for this suite. 08/24/23 13:01:28.22 ------------------------------ -• [4.825 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +• [SLOW TEST] [210.277 seconds] +[sig-api-machinery] ResourceQuota test/e2e/apimachinery/framework.go:23 - should mutate pod and apply defaults after mutation [Conformance] - test/e2e/apimachinery/webhook.go:264 + should apply changes to a resourcequota status [Conformance] + test/e2e/apimachinery/resource_quota.go:1010 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:56:53.642 - Jul 29 16:56:53.642: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 16:56:53.65 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:56:53.677 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:56:53.681 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 12:57:57.958 + Aug 24 12:57:57.958: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 12:57:57.96 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 12:57:57.991 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 12:57:58.001 + [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 16:56:53.709 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:56:55.143 - STEP: Deploying the webhook pod 07/29/23 16:56:55.16 - STEP: Wait for the deployment to be ready 07/29/23 16:56:55.181 - Jul 29 16:56:55.196: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created - STEP: Deploying the webhook service 07/29/23 16:56:57.217 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:56:57.236 - Jul 29 16:56:58.237: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should mutate pod and apply defaults after mutation [Conformance] - test/e2e/apimachinery/webhook.go:264 - STEP: Registering the mutating pod webhook via the AdmissionRegistration API 07/29/23 16:56:58.245 - STEP: create a pod that should be updated by the webhook 07/29/23 16:56:58.274 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [It] should apply changes to a resourcequota status [Conformance] + test/e2e/apimachinery/resource_quota.go:1010 + STEP: Creating resourceQuota "e2e-rq-status-954w6" 08/24/23 12:57:58.014 + Aug 24 12:57:58.039: INFO: Resource quota "e2e-rq-status-954w6" reports spec: hard cpu limit of 500m + Aug 24 12:57:58.040: INFO: Resource quota "e2e-rq-status-954w6" reports spec: hard memory limit of 500Mi + STEP: Updating resourceQuota "e2e-rq-status-954w6" /status 08/24/23 12:57:58.04 + STEP: Confirm /status for "e2e-rq-status-954w6" resourceQuota via watch 08/24/23 12:57:58.056 + Aug 24 12:57:58.062: INFO: observed resourceQuota "e2e-rq-status-954w6" in namespace "resourcequota-4515" with hard status: v1.ResourceList(nil) + Aug 24 12:57:58.062: INFO: Found resourceQuota "e2e-rq-status-954w6" in namespace "resourcequota-4515" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:500, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:524288000, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500Mi", Format:"BinarySI"}} + Aug 24 12:57:58.063: INFO: ResourceQuota "e2e-rq-status-954w6" /status was updated + STEP: Patching hard spec values for cpu & memory 08/24/23 12:57:58.087 + Aug 24 12:57:58.111: INFO: Resource quota "e2e-rq-status-954w6" reports spec: hard cpu limit of 1 + Aug 24 12:57:58.111: INFO: Resource quota "e2e-rq-status-954w6" reports spec: hard memory limit of 1Gi + STEP: Patching "e2e-rq-status-954w6" /status 08/24/23 12:57:58.111 + STEP: Confirm /status for "e2e-rq-status-954w6" resourceQuota via watch 08/24/23 12:57:58.121 + Aug 24 12:57:58.124: INFO: observed resourceQuota "e2e-rq-status-954w6" in namespace "resourcequota-4515" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:500, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:524288000, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"500Mi", Format:"BinarySI"}} + Aug 24 12:57:58.124: INFO: Found resourceQuota "e2e-rq-status-954w6" in namespace "resourcequota-4515" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:1, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:1073741824, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"1Gi", Format:"BinarySI"}} + Aug 24 12:57:58.124: INFO: ResourceQuota "e2e-rq-status-954w6" /status was patched + STEP: Get "e2e-rq-status-954w6" /status 08/24/23 12:57:58.124 + Aug 24 12:57:58.164: INFO: Resourcequota "e2e-rq-status-954w6" reports status: hard cpu of 1 + Aug 24 12:57:58.165: INFO: Resourcequota "e2e-rq-status-954w6" reports status: hard memory of 1Gi + STEP: Repatching "e2e-rq-status-954w6" /status before checking Spec is unchanged 08/24/23 12:57:58.174 + Aug 24 12:57:58.190: INFO: Resourcequota "e2e-rq-status-954w6" reports status: hard cpu of 2 + Aug 24 12:57:58.190: INFO: Resourcequota "e2e-rq-status-954w6" reports status: hard memory of 2Gi + Aug 24 12:57:58.194: INFO: Found resourceQuota "e2e-rq-status-954w6" in namespace "resourcequota-4515" with hard status: v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:2, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"2", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:2147483648, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"2Gi", Format:"BinarySI"}} + Aug 24 13:01:28.209: INFO: ResourceQuota "e2e-rq-status-954w6" Spec was unchanged and /status reset + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:56:58.318: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 13:01:28.209: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-2582" for this suite. 07/29/23 16:56:58.426 - STEP: Destroying namespace "webhook-2582-markers" for this suite. 07/29/23 16:56:58.456 + STEP: Destroying namespace "resourcequota-4515" for this suite. 08/24/23 13:01:28.22 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +S ------------------------------ [sig-storage] Downward API volume - should provide podname only [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:53 + should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:84 [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:56:58.473 -Jul 29 16:56:58.473: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 16:56:58.48 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:56:58.503 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:56:58.514 +STEP: Creating a kubernetes client 08/24/23 13:01:28.235 +Aug 24 13:01:28.235: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 13:01:28.238 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:28.28 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:28.286 [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-storage] Downward API volume test/e2e/common/storage/downwardapi_volume.go:44 -[It] should provide podname only [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:53 -STEP: Creating a pod to test downward API volume plugin 07/29/23 16:56:58.528 -Jul 29 16:56:58.560: INFO: Waiting up to 5m0s for pod "downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d" in namespace "downward-api-1842" to be "Succeeded or Failed" -Jul 29 16:56:58.573: INFO: Pod "downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d": Phase="Pending", Reason="", readiness=false. Elapsed: 13.508246ms -Jul 29 16:57:00.582: INFO: Pod "downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022771284s -Jul 29 16:57:02.581: INFO: Pod "downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021456496s -STEP: Saw pod success 07/29/23 16:57:02.581 -Jul 29 16:57:02.582: INFO: Pod "downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d" satisfied condition "Succeeded or Failed" -Jul 29 16:57:02.586: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d container client-container: -STEP: delete the pod 07/29/23 16:57:02.622 -Jul 29 16:57:02.646: INFO: Waiting for pod downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d to disappear -Jul 29 16:57:02.650: INFO: Pod downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d no longer exists +[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:84 +STEP: Creating a pod to test downward API volume plugin 08/24/23 13:01:28.292 +Aug 24 13:01:28.309: INFO: Waiting up to 5m0s for pod "downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d" in namespace "downward-api-5279" to be "Succeeded or Failed" +Aug 24 13:01:28.319: INFO: Pod "downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d": Phase="Pending", Reason="", readiness=false. Elapsed: 9.80886ms +Aug 24 13:01:30.334: INFO: Pod "downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02477605s +Aug 24 13:01:32.328: INFO: Pod "downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01866966s +STEP: Saw pod success 08/24/23 13:01:32.328 +Aug 24 13:01:32.328: INFO: Pod "downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d" satisfied condition "Succeeded or Failed" +Aug 24 13:01:32.336: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d container client-container: +STEP: delete the pod 08/24/23 13:01:32.37 +Aug 24 13:01:32.396: INFO: Waiting for pod downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d to disappear +Aug 24 13:01:32.402: INFO: Pod downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d no longer exists [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 16:57:02.650: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 13:01:32.403: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-1842" for this suite. 07/29/23 16:57:02.659 +STEP: Destroying namespace "downward-api-5279" for this suite. 08/24/23 13:01:32.414 ------------------------------ -• [4.198 seconds] +• [4.194 seconds] [sig-storage] Downward API volume test/e2e/common/storage/framework.go:23 - should provide podname only [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:53 + should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:84 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:56:58.473 - Jul 29 16:56:58.473: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 16:56:58.48 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:56:58.503 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:56:58.514 + STEP: Creating a kubernetes client 08/24/23 13:01:28.235 + Aug 24 13:01:28.235: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 13:01:28.238 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:28.28 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:28.286 [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-storage] Downward API volume test/e2e/common/storage/downwardapi_volume.go:44 - [It] should provide podname only [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:53 - STEP: Creating a pod to test downward API volume plugin 07/29/23 16:56:58.528 - Jul 29 16:56:58.560: INFO: Waiting up to 5m0s for pod "downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d" in namespace "downward-api-1842" to be "Succeeded or Failed" - Jul 29 16:56:58.573: INFO: Pod "downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d": Phase="Pending", Reason="", readiness=false. Elapsed: 13.508246ms - Jul 29 16:57:00.582: INFO: Pod "downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022771284s - Jul 29 16:57:02.581: INFO: Pod "downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021456496s - STEP: Saw pod success 07/29/23 16:57:02.581 - Jul 29 16:57:02.582: INFO: Pod "downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d" satisfied condition "Succeeded or Failed" - Jul 29 16:57:02.586: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d container client-container: - STEP: delete the pod 07/29/23 16:57:02.622 - Jul 29 16:57:02.646: INFO: Waiting for pod downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d to disappear - Jul 29 16:57:02.650: INFO: Pod downwardapi-volume-3aa8b159-0327-4c51-ad56-4a1c0fa3338d no longer exists + [It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:84 + STEP: Creating a pod to test downward API volume plugin 08/24/23 13:01:28.292 + Aug 24 13:01:28.309: INFO: Waiting up to 5m0s for pod "downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d" in namespace "downward-api-5279" to be "Succeeded or Failed" + Aug 24 13:01:28.319: INFO: Pod "downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d": Phase="Pending", Reason="", readiness=false. Elapsed: 9.80886ms + Aug 24 13:01:30.334: INFO: Pod "downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02477605s + Aug 24 13:01:32.328: INFO: Pod "downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01866966s + STEP: Saw pod success 08/24/23 13:01:32.328 + Aug 24 13:01:32.328: INFO: Pod "downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d" satisfied condition "Succeeded or Failed" + Aug 24 13:01:32.336: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d container client-container: + STEP: delete the pod 08/24/23 13:01:32.37 + Aug 24 13:01:32.396: INFO: Waiting for pod downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d to disappear + Aug 24 13:01:32.402: INFO: Pod downwardapi-volume-739aa9b7-9641-469d-9528-908edb339b0d no longer exists [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 16:57:02.650: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 13:01:32.403: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-1842" for this suite. 07/29/23 16:57:02.659 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSSSSSSSSSSSS ------------------------------- -[sig-network] Networking Granular Checks: Pods - should function for intra-pod communication: http [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:82 -[BeforeEach] [sig-network] Networking - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:57:02.673 -Jul 29 16:57:02.674: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pod-network-test 07/29/23 16:57:02.676 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:02.707 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:02.713 -[BeforeEach] [sig-network] Networking - test/e2e/framework/metrics/init/init.go:31 -[It] should function for intra-pod communication: http [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:82 -STEP: Performing setup for networking test in namespace pod-network-test-7107 07/29/23 16:57:02.718 -STEP: creating a selector 07/29/23 16:57:02.719 -STEP: Creating the service pods in kubernetes 07/29/23 16:57:02.719 -Jul 29 16:57:02.719: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable -Jul 29 16:57:02.816: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-7107" to be "running and ready" -Jul 29 16:57:02.874: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 57.735561ms -Jul 29 16:57:02.874: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:57:04.886: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.069501849s -Jul 29 16:57:04.886: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:57:06.915: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.098258329s -Jul 29 16:57:06.915: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:57:08.883: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.067120235s -Jul 29 16:57:08.884: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:57:10.890: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.073504692s -Jul 29 16:57:10.890: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:57:12.883: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.066515635s -Jul 29 16:57:12.883: INFO: The phase of Pod netserver-0 is Running (Ready = false) -Jul 29 16:57:14.884: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 12.067418477s -Jul 29 16:57:14.884: INFO: The phase of Pod netserver-0 is Running (Ready = true) -Jul 29 16:57:14.884: INFO: Pod "netserver-0" satisfied condition "running and ready" -Jul 29 16:57:14.891: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-7107" to be "running and ready" -Jul 29 16:57:14.918: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 27.439024ms -Jul 29 16:57:14.918: INFO: The phase of Pod netserver-1 is Running (Ready = true) -Jul 29 16:57:14.918: INFO: Pod "netserver-1" satisfied condition "running and ready" -Jul 29 16:57:14.925: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-7107" to be "running and ready" -Jul 29 16:57:14.933: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 7.462103ms -Jul 29 16:57:14.933: INFO: The phase of Pod netserver-2 is Running (Ready = true) -Jul 29 16:57:14.933: INFO: Pod "netserver-2" satisfied condition "running and ready" -STEP: Creating test pods 07/29/23 16:57:14.94 -Jul 29 16:57:14.962: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-7107" to be "running" -Jul 29 16:57:14.970: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 7.933051ms -Jul 29 16:57:16.982: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.019513881s -Jul 29 16:57:16.982: INFO: Pod "test-container-pod" satisfied condition "running" -Jul 29 16:57:16.988: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 -Jul 29 16:57:16.988: INFO: Breadth first check of 10.233.64.1 on host 192.168.121.120... -Jul 29 16:57:16.997: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.93:9080/dial?request=hostname&protocol=http&host=10.233.64.1&port=8083&tries=1'] Namespace:pod-network-test-7107 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:57:16.997: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:57:16.999: INFO: ExecWithOptions: Clientset creation -Jul 29 16:57:16.999: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7107/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.93%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.64.1%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) -Jul 29 16:57:17.160: INFO: Waiting for responses: map[] -Jul 29 16:57:17.160: INFO: reached 10.233.64.1 after 0/1 tries -Jul 29 16:57:17.160: INFO: Breadth first check of 10.233.65.118 on host 192.168.121.211... -Jul 29 16:57:17.170: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.93:9080/dial?request=hostname&protocol=http&host=10.233.65.118&port=8083&tries=1'] Namespace:pod-network-test-7107 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:57:17.170: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:57:17.172: INFO: ExecWithOptions: Clientset creation -Jul 29 16:57:17.172: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7107/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.93%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.65.118%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) -Jul 29 16:57:17.333: INFO: Waiting for responses: map[] -Jul 29 16:57:17.333: INFO: reached 10.233.65.118 after 0/1 tries -Jul 29 16:57:17.333: INFO: Breadth first check of 10.233.66.74 on host 192.168.121.141... -Jul 29 16:57:17.340: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.93:9080/dial?request=hostname&protocol=http&host=10.233.66.74&port=8083&tries=1'] Namespace:pod-network-test-7107 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 16:57:17.340: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 16:57:17.343: INFO: ExecWithOptions: Clientset creation -Jul 29 16:57:17.343: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7107/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.93%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.66.74%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) -Jul 29 16:57:17.465: INFO: Waiting for responses: map[] -Jul 29 16:57:17.465: INFO: reached 10.233.66.74 after 0/1 tries -Jul 29 16:57:17.465: INFO: Going to retry 0 out of 3 pods.... -[AfterEach] [sig-network] Networking - test/e2e/framework/node/init/init.go:32 -Jul 29 16:57:17.466: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Networking - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Networking - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Networking - tear down framework | framework.go:193 -STEP: Destroying namespace "pod-network-test-7107" for this suite. 07/29/23 16:57:17.478 ------------------------------- -• [SLOW TEST] [14.820 seconds] -[sig-network] Networking -test/e2e/common/network/framework.go:23 - Granular Checks: Pods - test/e2e/common/network/networking.go:32 - should function for intra-pod communication: http [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:82 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Networking - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:57:02.673 - Jul 29 16:57:02.674: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pod-network-test 07/29/23 16:57:02.676 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:02.707 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:02.713 - [BeforeEach] [sig-network] Networking - test/e2e/framework/metrics/init/init.go:31 - [It] should function for intra-pod communication: http [NodeConformance] [Conformance] - test/e2e/common/network/networking.go:82 - STEP: Performing setup for networking test in namespace pod-network-test-7107 07/29/23 16:57:02.718 - STEP: creating a selector 07/29/23 16:57:02.719 - STEP: Creating the service pods in kubernetes 07/29/23 16:57:02.719 - Jul 29 16:57:02.719: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable - Jul 29 16:57:02.816: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-7107" to be "running and ready" - Jul 29 16:57:02.874: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 57.735561ms - Jul 29 16:57:02.874: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:57:04.886: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.069501849s - Jul 29 16:57:04.886: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:57:06.915: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.098258329s - Jul 29 16:57:06.915: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:57:08.883: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.067120235s - Jul 29 16:57:08.884: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:57:10.890: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.073504692s - Jul 29 16:57:10.890: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:57:12.883: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.066515635s - Jul 29 16:57:12.883: INFO: The phase of Pod netserver-0 is Running (Ready = false) - Jul 29 16:57:14.884: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 12.067418477s - Jul 29 16:57:14.884: INFO: The phase of Pod netserver-0 is Running (Ready = true) - Jul 29 16:57:14.884: INFO: Pod "netserver-0" satisfied condition "running and ready" - Jul 29 16:57:14.891: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-7107" to be "running and ready" - Jul 29 16:57:14.918: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 27.439024ms - Jul 29 16:57:14.918: INFO: The phase of Pod netserver-1 is Running (Ready = true) - Jul 29 16:57:14.918: INFO: Pod "netserver-1" satisfied condition "running and ready" - Jul 29 16:57:14.925: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-7107" to be "running and ready" - Jul 29 16:57:14.933: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 7.462103ms - Jul 29 16:57:14.933: INFO: The phase of Pod netserver-2 is Running (Ready = true) - Jul 29 16:57:14.933: INFO: Pod "netserver-2" satisfied condition "running and ready" - STEP: Creating test pods 07/29/23 16:57:14.94 - Jul 29 16:57:14.962: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-7107" to be "running" - Jul 29 16:57:14.970: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 7.933051ms - Jul 29 16:57:16.982: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.019513881s - Jul 29 16:57:16.982: INFO: Pod "test-container-pod" satisfied condition "running" - Jul 29 16:57:16.988: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 - Jul 29 16:57:16.988: INFO: Breadth first check of 10.233.64.1 on host 192.168.121.120... - Jul 29 16:57:16.997: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.93:9080/dial?request=hostname&protocol=http&host=10.233.64.1&port=8083&tries=1'] Namespace:pod-network-test-7107 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:57:16.997: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:57:16.999: INFO: ExecWithOptions: Clientset creation - Jul 29 16:57:16.999: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7107/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.93%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.64.1%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) - Jul 29 16:57:17.160: INFO: Waiting for responses: map[] - Jul 29 16:57:17.160: INFO: reached 10.233.64.1 after 0/1 tries - Jul 29 16:57:17.160: INFO: Breadth first check of 10.233.65.118 on host 192.168.121.211... - Jul 29 16:57:17.170: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.93:9080/dial?request=hostname&protocol=http&host=10.233.65.118&port=8083&tries=1'] Namespace:pod-network-test-7107 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:57:17.170: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:57:17.172: INFO: ExecWithOptions: Clientset creation - Jul 29 16:57:17.172: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7107/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.93%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.65.118%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) - Jul 29 16:57:17.333: INFO: Waiting for responses: map[] - Jul 29 16:57:17.333: INFO: reached 10.233.65.118 after 0/1 tries - Jul 29 16:57:17.333: INFO: Breadth first check of 10.233.66.74 on host 192.168.121.141... - Jul 29 16:57:17.340: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.93:9080/dial?request=hostname&protocol=http&host=10.233.66.74&port=8083&tries=1'] Namespace:pod-network-test-7107 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 16:57:17.340: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 16:57:17.343: INFO: ExecWithOptions: Clientset creation - Jul 29 16:57:17.343: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-7107/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.93%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dhttp%26host%3D10.233.66.74%26port%3D8083%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) - Jul 29 16:57:17.465: INFO: Waiting for responses: map[] - Jul 29 16:57:17.465: INFO: reached 10.233.66.74 after 0/1 tries - Jul 29 16:57:17.465: INFO: Going to retry 0 out of 3 pods.... - [AfterEach] [sig-network] Networking - test/e2e/framework/node/init/init.go:32 - Jul 29 16:57:17.466: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Networking - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Networking - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Networking - tear down framework | framework.go:193 - STEP: Destroying namespace "pod-network-test-7107" for this suite. 07/29/23 16:57:17.478 + STEP: Destroying namespace "downward-api-5279" for this suite. 08/24/23 13:01:32.414 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Variable Expansion - should allow substituting values in a container's command [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:73 -[BeforeEach] [sig-node] Variable Expansion +[sig-node] Containers + should use the image defaults if command and args are blank [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:39 +[BeforeEach] [sig-node] Containers set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:57:17.498 -Jul 29 16:57:17.499: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename var-expansion 07/29/23 16:57:17.5 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:17.531 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:17.539 -[BeforeEach] [sig-node] Variable Expansion +STEP: Creating a kubernetes client 08/24/23 13:01:32.433 +Aug 24 13:01:32.433: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename containers 08/24/23 13:01:32.436 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:32.468 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:32.473 +[BeforeEach] [sig-node] Containers test/e2e/framework/metrics/init/init.go:31 -[It] should allow substituting values in a container's command [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:73 -STEP: Creating a pod to test substitution in container's command 07/29/23 16:57:17.545 -Jul 29 16:57:17.565: INFO: Waiting up to 5m0s for pod "var-expansion-d6230f02-4219-497d-8986-5e81c722a49b" in namespace "var-expansion-7721" to be "Succeeded or Failed" -Jul 29 16:57:17.575: INFO: Pod "var-expansion-d6230f02-4219-497d-8986-5e81c722a49b": Phase="Pending", Reason="", readiness=false. Elapsed: 9.419443ms -Jul 29 16:57:19.584: INFO: Pod "var-expansion-d6230f02-4219-497d-8986-5e81c722a49b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018831501s -Jul 29 16:57:21.584: INFO: Pod "var-expansion-d6230f02-4219-497d-8986-5e81c722a49b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018837814s -STEP: Saw pod success 07/29/23 16:57:21.584 -Jul 29 16:57:21.585: INFO: Pod "var-expansion-d6230f02-4219-497d-8986-5e81c722a49b" satisfied condition "Succeeded or Failed" -Jul 29 16:57:21.590: INFO: Trying to get logs from node wetuj3nuajog-3 pod var-expansion-d6230f02-4219-497d-8986-5e81c722a49b container dapi-container: -STEP: delete the pod 07/29/23 16:57:21.61 -Jul 29 16:57:21.658: INFO: Waiting for pod var-expansion-d6230f02-4219-497d-8986-5e81c722a49b to disappear -Jul 29 16:57:21.667: INFO: Pod var-expansion-d6230f02-4219-497d-8986-5e81c722a49b no longer exists -[AfterEach] [sig-node] Variable Expansion +[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:39 +Aug 24 13:01:32.493: INFO: Waiting up to 5m0s for pod "client-containers-016a2448-e31a-4647-97b4-3276483d50f7" in namespace "containers-329" to be "running" +Aug 24 13:01:32.502: INFO: Pod "client-containers-016a2448-e31a-4647-97b4-3276483d50f7": Phase="Pending", Reason="", readiness=false. Elapsed: 8.007743ms +Aug 24 13:01:34.513: INFO: Pod "client-containers-016a2448-e31a-4647-97b4-3276483d50f7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019759729s +Aug 24 13:01:36.509: INFO: Pod "client-containers-016a2448-e31a-4647-97b4-3276483d50f7": Phase="Running", Reason="", readiness=true. Elapsed: 4.015470762s +Aug 24 13:01:36.509: INFO: Pod "client-containers-016a2448-e31a-4647-97b4-3276483d50f7" satisfied condition "running" +[AfterEach] [sig-node] Containers test/e2e/framework/node/init/init.go:32 -Jul 29 16:57:21.667: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Variable Expansion +Aug 24 13:01:36.525: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Containers test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-node] Containers dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-node] Containers tear down framework | framework.go:193 -STEP: Destroying namespace "var-expansion-7721" for this suite. 07/29/23 16:57:21.679 +STEP: Destroying namespace "containers-329" for this suite. 08/24/23 13:01:36.534 ------------------------------ -• [4.193 seconds] -[sig-node] Variable Expansion +• [4.117 seconds] +[sig-node] Containers test/e2e/common/node/framework.go:23 - should allow substituting values in a container's command [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:73 + should use the image defaults if command and args are blank [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:39 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Variable Expansion + [BeforeEach] [sig-node] Containers set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:57:17.498 - Jul 29 16:57:17.499: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename var-expansion 07/29/23 16:57:17.5 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:17.531 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:17.539 - [BeforeEach] [sig-node] Variable Expansion + STEP: Creating a kubernetes client 08/24/23 13:01:32.433 + Aug 24 13:01:32.433: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename containers 08/24/23 13:01:32.436 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:32.468 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:32.473 + [BeforeEach] [sig-node] Containers test/e2e/framework/metrics/init/init.go:31 - [It] should allow substituting values in a container's command [NodeConformance] [Conformance] - test/e2e/common/node/expansion.go:73 - STEP: Creating a pod to test substitution in container's command 07/29/23 16:57:17.545 - Jul 29 16:57:17.565: INFO: Waiting up to 5m0s for pod "var-expansion-d6230f02-4219-497d-8986-5e81c722a49b" in namespace "var-expansion-7721" to be "Succeeded or Failed" - Jul 29 16:57:17.575: INFO: Pod "var-expansion-d6230f02-4219-497d-8986-5e81c722a49b": Phase="Pending", Reason="", readiness=false. Elapsed: 9.419443ms - Jul 29 16:57:19.584: INFO: Pod "var-expansion-d6230f02-4219-497d-8986-5e81c722a49b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018831501s - Jul 29 16:57:21.584: INFO: Pod "var-expansion-d6230f02-4219-497d-8986-5e81c722a49b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018837814s - STEP: Saw pod success 07/29/23 16:57:21.584 - Jul 29 16:57:21.585: INFO: Pod "var-expansion-d6230f02-4219-497d-8986-5e81c722a49b" satisfied condition "Succeeded or Failed" - Jul 29 16:57:21.590: INFO: Trying to get logs from node wetuj3nuajog-3 pod var-expansion-d6230f02-4219-497d-8986-5e81c722a49b container dapi-container: - STEP: delete the pod 07/29/23 16:57:21.61 - Jul 29 16:57:21.658: INFO: Waiting for pod var-expansion-d6230f02-4219-497d-8986-5e81c722a49b to disappear - Jul 29 16:57:21.667: INFO: Pod var-expansion-d6230f02-4219-497d-8986-5e81c722a49b no longer exists - [AfterEach] [sig-node] Variable Expansion + [It] should use the image defaults if command and args are blank [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:39 + Aug 24 13:01:32.493: INFO: Waiting up to 5m0s for pod "client-containers-016a2448-e31a-4647-97b4-3276483d50f7" in namespace "containers-329" to be "running" + Aug 24 13:01:32.502: INFO: Pod "client-containers-016a2448-e31a-4647-97b4-3276483d50f7": Phase="Pending", Reason="", readiness=false. Elapsed: 8.007743ms + Aug 24 13:01:34.513: INFO: Pod "client-containers-016a2448-e31a-4647-97b4-3276483d50f7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019759729s + Aug 24 13:01:36.509: INFO: Pod "client-containers-016a2448-e31a-4647-97b4-3276483d50f7": Phase="Running", Reason="", readiness=true. Elapsed: 4.015470762s + Aug 24 13:01:36.509: INFO: Pod "client-containers-016a2448-e31a-4647-97b4-3276483d50f7" satisfied condition "running" + [AfterEach] [sig-node] Containers test/e2e/framework/node/init/init.go:32 - Jul 29 16:57:21.667: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Variable Expansion + Aug 24 13:01:36.525: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Containers test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-node] Containers dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-node] Containers tear down framework | framework.go:193 - STEP: Destroying namespace "var-expansion-7721" for this suite. 07/29/23 16:57:21.679 + STEP: Destroying namespace "containers-329" for this suite. 08/24/23 13:01:36.534 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition - creating/deleting custom resource definition objects works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:58 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[sig-network] Services + should delete a collection of services [Conformance] + test/e2e/network/service.go:3654 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:57:21.696 -Jul 29 16:57:21.696: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename custom-resource-definition 07/29/23 16:57:21.699 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:21.741 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:21.747 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 13:01:36.568 +Aug 24 13:01:36.569: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 13:01:36.57 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:36.609 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:36.616 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[It] creating/deleting custom resource definition objects works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:58 -Jul 29 16:57:21.752: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should delete a collection of services [Conformance] + test/e2e/network/service.go:3654 +STEP: creating a collection of services 08/24/23 13:01:36.626 +Aug 24 13:01:36.626: INFO: Creating e2e-svc-a-sdwwd +Aug 24 13:01:36.653: INFO: Creating e2e-svc-b-pkxnx +Aug 24 13:01:36.680: INFO: Creating e2e-svc-c-wknbr +STEP: deleting service collection 08/24/23 13:01:36.709 +Aug 24 13:01:36.789: INFO: Collection of services has been deleted +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 16:57:22.808: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +Aug 24 13:01:36.789: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "custom-resource-definition-533" for this suite. 07/29/23 16:57:22.82 +STEP: Destroying namespace "services-4410" for this suite. 08/24/23 13:01:36.801 ------------------------------ -• [1.137 seconds] -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - Simple CustomResourceDefinition - test/e2e/apimachinery/custom_resource_definition.go:50 - creating/deleting custom resource definition objects works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:58 +• [0.252 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should delete a collection of services [Conformance] + test/e2e/network/service.go:3654 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:57:21.696 - Jul 29 16:57:21.696: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename custom-resource-definition 07/29/23 16:57:21.699 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:21.741 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:21.747 - [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 13:01:36.568 + Aug 24 13:01:36.569: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 13:01:36.57 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:36.609 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:36.616 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [It] creating/deleting custom resource definition objects works [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:58 - Jul 29 16:57:21.752: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should delete a collection of services [Conformance] + test/e2e/network/service.go:3654 + STEP: creating a collection of services 08/24/23 13:01:36.626 + Aug 24 13:01:36.626: INFO: Creating e2e-svc-a-sdwwd + Aug 24 13:01:36.653: INFO: Creating e2e-svc-b-pkxnx + Aug 24 13:01:36.680: INFO: Creating e2e-svc-c-wknbr + STEP: deleting service collection 08/24/23 13:01:36.709 + Aug 24 13:01:36.789: INFO: Collection of services has been deleted + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 16:57:22.808: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + Aug 24 13:01:36.789: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "custom-resource-definition-533" for this suite. 07/29/23 16:57:22.82 + STEP: Destroying namespace "services-4410" for this suite. 08/24/23 13:01:36.801 << End Captured GinkgoWriter Output ------------------------------ SS ------------------------------ -[sig-storage] ConfigMap - should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:99 -[BeforeEach] [sig-storage] ConfigMap +[sig-apps] ReplicationController + should release no longer matching pods [Conformance] + test/e2e/apps/rc.go:101 +[BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:57:22.834 -Jul 29 16:57:22.836: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 16:57:22.84 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:22.887 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:22.893 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 13:01:36.822 +Aug 24 13:01:36.823: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replication-controller 08/24/23 13:01:36.825 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:36.854 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:36.86 +[BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:99 -STEP: Creating configMap with name configmap-test-volume-map-6fce3982-86e8-4d07-9994-93627478228d 07/29/23 16:57:22.898 -STEP: Creating a pod to test consume configMaps 07/29/23 16:57:22.908 -Jul 29 16:57:22.963: INFO: Waiting up to 5m0s for pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309" in namespace "configmap-5070" to be "Succeeded or Failed" -Jul 29 16:57:22.973: INFO: Pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309": Phase="Pending", Reason="", readiness=false. Elapsed: 9.329108ms -Jul 29 16:57:24.984: INFO: Pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309": Phase="Running", Reason="", readiness=true. Elapsed: 2.020329449s -Jul 29 16:57:26.981: INFO: Pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309": Phase="Running", Reason="", readiness=false. Elapsed: 4.017776849s -Jul 29 16:57:28.981: INFO: Pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.01689023s -STEP: Saw pod success 07/29/23 16:57:28.981 -Jul 29 16:57:28.981: INFO: Pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309" satisfied condition "Succeeded or Failed" -Jul 29 16:57:28.986: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309 container agnhost-container: -STEP: delete the pod 07/29/23 16:57:28.996 -Jul 29 16:57:29.021: INFO: Waiting for pod pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309 to disappear -Jul 29 16:57:29.027: INFO: Pod pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309 no longer exists -[AfterEach] [sig-storage] ConfigMap +[BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 +[It] should release no longer matching pods [Conformance] + test/e2e/apps/rc.go:101 +STEP: Given a ReplicationController is created 08/24/23 13:01:36.868 +STEP: When the matched label of one of its pods change 08/24/23 13:01:36.902 +Aug 24 13:01:36.911: INFO: Pod name pod-release: Found 0 pods out of 1 +Aug 24 13:01:41.920: INFO: Pod name pod-release: Found 1 pods out of 1 +STEP: Then the pod is released 08/24/23 13:01:41.942 +[AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 -Jul 29 16:57:29.028: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 13:01:42.981: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-5070" for this suite. 07/29/23 16:57:29.038 +STEP: Destroying namespace "replication-controller-6712" for this suite. 08/24/23 13:01:43.003 ------------------------------ -• [SLOW TEST] [6.214 seconds] -[sig-storage] ConfigMap -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:99 +• [SLOW TEST] [6.201 seconds] +[sig-apps] ReplicationController +test/e2e/apps/framework.go:23 + should release no longer matching pods [Conformance] + test/e2e/apps/rc.go:101 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:57:22.834 - Jul 29 16:57:22.836: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 16:57:22.84 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:22.887 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:22.893 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 13:01:36.822 + Aug 24 13:01:36.823: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replication-controller 08/24/23 13:01:36.825 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:36.854 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:36.86 + [BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:99 - STEP: Creating configMap with name configmap-test-volume-map-6fce3982-86e8-4d07-9994-93627478228d 07/29/23 16:57:22.898 - STEP: Creating a pod to test consume configMaps 07/29/23 16:57:22.908 - Jul 29 16:57:22.963: INFO: Waiting up to 5m0s for pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309" in namespace "configmap-5070" to be "Succeeded or Failed" - Jul 29 16:57:22.973: INFO: Pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309": Phase="Pending", Reason="", readiness=false. Elapsed: 9.329108ms - Jul 29 16:57:24.984: INFO: Pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309": Phase="Running", Reason="", readiness=true. Elapsed: 2.020329449s - Jul 29 16:57:26.981: INFO: Pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309": Phase="Running", Reason="", readiness=false. Elapsed: 4.017776849s - Jul 29 16:57:28.981: INFO: Pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.01689023s - STEP: Saw pod success 07/29/23 16:57:28.981 - Jul 29 16:57:28.981: INFO: Pod "pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309" satisfied condition "Succeeded or Failed" - Jul 29 16:57:28.986: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309 container agnhost-container: - STEP: delete the pod 07/29/23 16:57:28.996 - Jul 29 16:57:29.021: INFO: Waiting for pod pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309 to disappear - Jul 29 16:57:29.027: INFO: Pod pod-configmaps-3ea30942-ddb6-4be8-b5dc-7c82a0d87309 no longer exists - [AfterEach] [sig-storage] ConfigMap + [BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 + [It] should release no longer matching pods [Conformance] + test/e2e/apps/rc.go:101 + STEP: Given a ReplicationController is created 08/24/23 13:01:36.868 + STEP: When the matched label of one of its pods change 08/24/23 13:01:36.902 + Aug 24 13:01:36.911: INFO: Pod name pod-release: Found 0 pods out of 1 + Aug 24 13:01:41.920: INFO: Pod name pod-release: Found 1 pods out of 1 + STEP: Then the pod is released 08/24/23 13:01:41.942 + [AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 - Jul 29 16:57:29.028: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 13:01:42.981: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-5070" for this suite. 07/29/23 16:57:29.038 + STEP: Destroying namespace "replication-controller-6712" for this suite. 08/24/23 13:01:43.003 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSS +S ------------------------------ -[sig-api-machinery] Watchers - should observe add, update, and delete watch notifications on configmaps [Conformance] - test/e2e/apimachinery/watch.go:60 -[BeforeEach] [sig-api-machinery] Watchers +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a pod. [Conformance] + test/e2e/apimachinery/resource_quota.go:230 +[BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:57:29.053 -Jul 29 16:57:29.054: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename watch 07/29/23 16:57:29.056 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:29.086 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:29.092 -[BeforeEach] [sig-api-machinery] Watchers +STEP: Creating a kubernetes client 08/24/23 13:01:43.024 +Aug 24 13:01:43.024: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 13:01:43.025 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:43.065 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:43.07 +[BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 -[It] should observe add, update, and delete watch notifications on configmaps [Conformance] - test/e2e/apimachinery/watch.go:60 -STEP: creating a watch on configmaps with label A 07/29/23 16:57:29.096 -STEP: creating a watch on configmaps with label B 07/29/23 16:57:29.098 -STEP: creating a watch on configmaps with label A or B 07/29/23 16:57:29.1 -STEP: creating a configmap with label A and ensuring the correct watchers observe the notification 07/29/23 16:57:29.101 -Jul 29 16:57:29.110: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35416 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:57:29.110: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35416 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: modifying configmap A and ensuring the correct watchers observe the notification 07/29/23 16:57:29.111 -Jul 29 16:57:29.125: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35417 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:57:29.126: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35417 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: modifying configmap A again and ensuring the correct watchers observe the notification 07/29/23 16:57:29.126 -Jul 29 16:57:29.143: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35418 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:57:29.144: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35418 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: deleting configmap A and ensuring the correct watchers observe the notification 07/29/23 16:57:29.144 -Jul 29 16:57:29.155: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35419 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:57:29.155: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35419 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: creating a configmap with label B and ensuring the correct watchers observe the notification 07/29/23 16:57:29.155 -Jul 29 16:57:29.164: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-1623 4d424789-9d05-4842-829b-5bbdb673c967 35420 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:57:29.164: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-1623 4d424789-9d05-4842-829b-5bbdb673c967 35420 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: deleting configmap B and ensuring the correct watchers observe the notification 07/29/23 16:57:39.166 -Jul 29 16:57:39.179: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-1623 4d424789-9d05-4842-829b-5bbdb673c967 35453 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:57:39.179: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-1623 4d424789-9d05-4842-829b-5bbdb673c967 35453 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -[AfterEach] [sig-api-machinery] Watchers +[It] should create a ResourceQuota and capture the life of a pod. [Conformance] + test/e2e/apimachinery/resource_quota.go:230 +STEP: Counting existing ResourceQuota 08/24/23 13:01:43.109 +STEP: Creating a ResourceQuota 08/24/23 13:01:48.116 +STEP: Ensuring resource quota status is calculated 08/24/23 13:01:48.13 +STEP: Creating a Pod that fits quota 08/24/23 13:01:50.137 +STEP: Ensuring ResourceQuota status captures the pod usage 08/24/23 13:01:50.164 +STEP: Not allowing a pod to be created that exceeds remaining quota 08/24/23 13:01:52.178 +STEP: Not allowing a pod to be created that exceeds remaining quota(validation on extended resources) 08/24/23 13:01:52.187 +STEP: Ensuring a pod cannot update its resource requirements 08/24/23 13:01:52.193 +STEP: Ensuring attempts to update pod resource requirements did not change quota usage 08/24/23 13:01:52.202 +STEP: Deleting the pod 08/24/23 13:01:54.212 +STEP: Ensuring resource quota status released the pod usage 08/24/23 13:01:54.24 +[AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 16:57:49.183: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Watchers +Aug 24 13:01:56.247: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Watchers +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Watchers +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "watch-1623" for this suite. 07/29/23 16:57:49.202 +STEP: Destroying namespace "resourcequota-2201" for this suite. 08/24/23 13:01:56.258 ------------------------------ -• [SLOW TEST] [20.165 seconds] -[sig-api-machinery] Watchers +• [SLOW TEST] [13.248 seconds] +[sig-api-machinery] ResourceQuota test/e2e/apimachinery/framework.go:23 - should observe add, update, and delete watch notifications on configmaps [Conformance] - test/e2e/apimachinery/watch.go:60 + should create a ResourceQuota and capture the life of a pod. [Conformance] + test/e2e/apimachinery/resource_quota.go:230 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Watchers + [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:57:29.053 - Jul 29 16:57:29.054: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename watch 07/29/23 16:57:29.056 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:29.086 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:29.092 - [BeforeEach] [sig-api-machinery] Watchers + STEP: Creating a kubernetes client 08/24/23 13:01:43.024 + Aug 24 13:01:43.024: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 13:01:43.025 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:43.065 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:43.07 + [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [It] should observe add, update, and delete watch notifications on configmaps [Conformance] - test/e2e/apimachinery/watch.go:60 - STEP: creating a watch on configmaps with label A 07/29/23 16:57:29.096 - STEP: creating a watch on configmaps with label B 07/29/23 16:57:29.098 - STEP: creating a watch on configmaps with label A or B 07/29/23 16:57:29.1 - STEP: creating a configmap with label A and ensuring the correct watchers observe the notification 07/29/23 16:57:29.101 - Jul 29 16:57:29.110: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35416 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:57:29.110: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35416 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} - STEP: modifying configmap A and ensuring the correct watchers observe the notification 07/29/23 16:57:29.111 - Jul 29 16:57:29.125: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35417 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:57:29.126: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35417 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} - STEP: modifying configmap A again and ensuring the correct watchers observe the notification 07/29/23 16:57:29.126 - Jul 29 16:57:29.143: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35418 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:57:29.144: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35418 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} - STEP: deleting configmap A and ensuring the correct watchers observe the notification 07/29/23 16:57:29.144 - Jul 29 16:57:29.155: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35419 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:57:29.155: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-1623 dab15b47-5697-4aff-854a-185a901a85f9 35419 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} - STEP: creating a configmap with label B and ensuring the correct watchers observe the notification 07/29/23 16:57:29.155 - Jul 29 16:57:29.164: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-1623 4d424789-9d05-4842-829b-5bbdb673c967 35420 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:57:29.164: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-1623 4d424789-9d05-4842-829b-5bbdb673c967 35420 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} - STEP: deleting configmap B and ensuring the correct watchers observe the notification 07/29/23 16:57:39.166 - Jul 29 16:57:39.179: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-1623 4d424789-9d05-4842-829b-5bbdb673c967 35453 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:57:39.179: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-1623 4d424789-9d05-4842-829b-5bbdb673c967 35453 0 2023-07-29 16:57:29 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2023-07-29 16:57:29 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} - [AfterEach] [sig-api-machinery] Watchers + [It] should create a ResourceQuota and capture the life of a pod. [Conformance] + test/e2e/apimachinery/resource_quota.go:230 + STEP: Counting existing ResourceQuota 08/24/23 13:01:43.109 + STEP: Creating a ResourceQuota 08/24/23 13:01:48.116 + STEP: Ensuring resource quota status is calculated 08/24/23 13:01:48.13 + STEP: Creating a Pod that fits quota 08/24/23 13:01:50.137 + STEP: Ensuring ResourceQuota status captures the pod usage 08/24/23 13:01:50.164 + STEP: Not allowing a pod to be created that exceeds remaining quota 08/24/23 13:01:52.178 + STEP: Not allowing a pod to be created that exceeds remaining quota(validation on extended resources) 08/24/23 13:01:52.187 + STEP: Ensuring a pod cannot update its resource requirements 08/24/23 13:01:52.193 + STEP: Ensuring attempts to update pod resource requirements did not change quota usage 08/24/23 13:01:52.202 + STEP: Deleting the pod 08/24/23 13:01:54.212 + STEP: Ensuring resource quota status released the pod usage 08/24/23 13:01:54.24 + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:57:49.183: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Watchers + Aug 24 13:01:56.247: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Watchers + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Watchers + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "watch-1623" for this suite. 07/29/23 16:57:49.202 + STEP: Destroying namespace "resourcequota-2201" for this suite. 08/24/23 13:01:56.258 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-cli] Kubectl client Proxy server - should support --unix-socket=/path [Conformance] - test/e2e/kubectl/kubectl.go:1812 -[BeforeEach] [sig-cli] Kubectl client +[sig-storage] EmptyDir wrapper volumes + should not conflict [Conformance] + test/e2e/storage/empty_dir_wrapper.go:67 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:57:49.222 -Jul 29 16:57:49.223: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 16:57:49.226 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:49.251 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:49.258 -[BeforeEach] [sig-cli] Kubectl client +STEP: Creating a kubernetes client 08/24/23 13:01:56.302 +Aug 24 13:01:56.302: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir-wrapper 08/24/23 13:01:56.306 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:56.335 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:56.341 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[It] should support --unix-socket=/path [Conformance] - test/e2e/kubectl/kubectl.go:1812 -STEP: Starting the proxy 07/29/23 16:57:49.264 -Jul 29 16:57:49.266: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3635 proxy --unix-socket=/tmp/kubectl-proxy-unix807589015/test' -STEP: retrieving proxy /api/ output 07/29/23 16:57:49.379 -[AfterEach] [sig-cli] Kubectl client +[It] should not conflict [Conformance] + test/e2e/storage/empty_dir_wrapper.go:67 +Aug 24 13:01:56.397: INFO: Waiting up to 5m0s for pod "pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad" in namespace "emptydir-wrapper-2893" to be "running and ready" +Aug 24 13:01:56.404: INFO: Pod "pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad": Phase="Pending", Reason="", readiness=false. Elapsed: 7.468149ms +Aug 24 13:01:56.404: INFO: The phase of Pod pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad is Pending, waiting for it to be Running (with Ready = true) +Aug 24 13:01:58.412: INFO: Pod "pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad": Phase="Running", Reason="", readiness=true. Elapsed: 2.015041016s +Aug 24 13:01:58.412: INFO: The phase of Pod pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad is Running (Ready = true) +Aug 24 13:01:58.412: INFO: Pod "pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad" satisfied condition "running and ready" +STEP: Cleaning up the secret 08/24/23 13:01:58.418 +STEP: Cleaning up the configmap 08/24/23 13:01:58.43 +STEP: Cleaning up the pod 08/24/23 13:01:58.449 +[AfterEach] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/node/init/init.go:32 -Jul 29 16:57:49.384: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client +Aug 24 13:01:58.470: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client +[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-3635" for this suite. 07/29/23 16:57:49.394 +STEP: Destroying namespace "emptydir-wrapper-2893" for this suite. 08/24/23 13:01:58.482 ------------------------------ -• [0.183 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Proxy server - test/e2e/kubectl/kubectl.go:1780 - should support --unix-socket=/path [Conformance] - test/e2e/kubectl/kubectl.go:1812 +• [2.199 seconds] +[sig-storage] EmptyDir wrapper volumes +test/e2e/storage/utils/framework.go:23 + should not conflict [Conformance] + test/e2e/storage/empty_dir_wrapper.go:67 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client + [BeforeEach] [sig-storage] EmptyDir wrapper volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:57:49.222 - Jul 29 16:57:49.223: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 16:57:49.226 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:49.251 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:49.258 - [BeforeEach] [sig-cli] Kubectl client + STEP: Creating a kubernetes client 08/24/23 13:01:56.302 + Aug 24 13:01:56.302: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir-wrapper 08/24/23 13:01:56.306 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:56.335 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:56.341 + [BeforeEach] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [It] should support --unix-socket=/path [Conformance] - test/e2e/kubectl/kubectl.go:1812 - STEP: Starting the proxy 07/29/23 16:57:49.264 - Jul 29 16:57:49.266: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-3635 proxy --unix-socket=/tmp/kubectl-proxy-unix807589015/test' - STEP: retrieving proxy /api/ output 07/29/23 16:57:49.379 - [AfterEach] [sig-cli] Kubectl client + [It] should not conflict [Conformance] + test/e2e/storage/empty_dir_wrapper.go:67 + Aug 24 13:01:56.397: INFO: Waiting up to 5m0s for pod "pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad" in namespace "emptydir-wrapper-2893" to be "running and ready" + Aug 24 13:01:56.404: INFO: Pod "pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad": Phase="Pending", Reason="", readiness=false. Elapsed: 7.468149ms + Aug 24 13:01:56.404: INFO: The phase of Pod pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad is Pending, waiting for it to be Running (with Ready = true) + Aug 24 13:01:58.412: INFO: Pod "pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad": Phase="Running", Reason="", readiness=true. Elapsed: 2.015041016s + Aug 24 13:01:58.412: INFO: The phase of Pod pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad is Running (Ready = true) + Aug 24 13:01:58.412: INFO: Pod "pod-secrets-7dda175a-70d9-4279-aac0-258039a463ad" satisfied condition "running and ready" + STEP: Cleaning up the secret 08/24/23 13:01:58.418 + STEP: Cleaning up the configmap 08/24/23 13:01:58.43 + STEP: Cleaning up the pod 08/24/23 13:01:58.449 + [AfterEach] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/node/init/init.go:32 - Jul 29 16:57:49.384: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client + Aug 24 13:01:58.470: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client + [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-3635" for this suite. 07/29/23 16:57:49.394 + STEP: Destroying namespace "emptydir-wrapper-2893" for this suite. 08/24/23 13:01:58.482 << End Captured GinkgoWriter Output ------------------------------ -[sig-apps] Job - should manage the lifecycle of a job [Conformance] - test/e2e/apps/job.go:703 -[BeforeEach] [sig-apps] Job +SS +------------------------------ +[sig-scheduling] SchedulerPreemption [Serial] + validates basic preemption works [Conformance] + test/e2e/scheduling/preemption.go:130 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:57:49.407 -Jul 29 16:57:49.407: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename job 07/29/23 16:57:49.411 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:49.437 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:49.441 -[BeforeEach] [sig-apps] Job +STEP: Creating a kubernetes client 08/24/23 13:01:58.502 +Aug 24 13:01:58.502: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sched-preemption 08/24/23 13:01:58.505 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:58.531 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:58.536 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] should manage the lifecycle of a job [Conformance] - test/e2e/apps/job.go:703 -STEP: Creating a suspended job 07/29/23 16:57:49.45 -STEP: Patching the Job 07/29/23 16:57:49.462 -STEP: Watching for Job to be patched 07/29/23 16:57:49.49 -Jul 29 16:57:49.494: INFO: Event ADDED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking:] -Jul 29 16:57:49.494: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking:] -Jul 29 16:57:49.494: INFO: Event MODIFIED found for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking:] -STEP: Updating the job 07/29/23 16:57:49.494 -STEP: Watching for Job to be updated 07/29/23 16:57:49.513 -Jul 29 16:57:49.517: INFO: Event MODIFIED found for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] -Jul 29 16:57:49.517: INFO: Found Job annotations: map[string]string{"batch.kubernetes.io/job-tracking":"", "updated":"true"} -STEP: Listing all Jobs with LabelSelector 07/29/23 16:57:49.517 -Jul 29 16:57:49.524: INFO: Job: e2e-h22fr as labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] -STEP: Waiting for job to complete 07/29/23 16:57:49.524 -STEP: Delete a job collection with a labelselector 07/29/23 16:57:59.534 -STEP: Watching for Job to be deleted 07/29/23 16:57:59.547 -Jul 29 16:57:59.553: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] -Jul 29 16:57:59.554: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] -Jul 29 16:57:59.554: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] -Jul 29 16:57:59.554: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] -Jul 29 16:57:59.554: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] -Jul 29 16:57:59.555: INFO: Event DELETED found for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] -STEP: Relist jobs to confirm deletion 07/29/23 16:57:59.555 -[AfterEach] [sig-apps] Job +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:97 +Aug 24 13:01:58.567: INFO: Waiting up to 1m0s for all nodes to be ready +Aug 24 13:02:58.636: INFO: Waiting for terminating namespaces to be deleted... +[It] validates basic preemption works [Conformance] + test/e2e/scheduling/preemption.go:130 +STEP: Create pods that use 4/5 of node resources. 08/24/23 13:02:58.642 +Aug 24 13:02:58.678: INFO: Created pod: pod0-0-sched-preemption-low-priority +Aug 24 13:02:58.695: INFO: Created pod: pod0-1-sched-preemption-medium-priority +Aug 24 13:02:58.762: INFO: Created pod: pod1-0-sched-preemption-medium-priority +Aug 24 13:02:58.824: INFO: Created pod: pod1-1-sched-preemption-medium-priority +Aug 24 13:02:58.913: INFO: Created pod: pod2-0-sched-preemption-medium-priority +Aug 24 13:02:58.938: INFO: Created pod: pod2-1-sched-preemption-medium-priority +STEP: Wait for pods to be scheduled. 08/24/23 13:02:58.939 +Aug 24 13:02:58.939: INFO: Waiting up to 5m0s for pod "pod0-0-sched-preemption-low-priority" in namespace "sched-preemption-1660" to be "running" +Aug 24 13:02:58.961: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 22.406598ms +Aug 24 13:03:00.972: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 2.03305483s +Aug 24 13:03:02.970: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Running", Reason="", readiness=true. Elapsed: 4.030575975s +Aug 24 13:03:02.970: INFO: Pod "pod0-0-sched-preemption-low-priority" satisfied condition "running" +Aug 24 13:03:02.970: INFO: Waiting up to 5m0s for pod "pod0-1-sched-preemption-medium-priority" in namespace "sched-preemption-1660" to be "running" +Aug 24 13:03:02.977: INFO: Pod "pod0-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 7.026974ms +Aug 24 13:03:02.977: INFO: Pod "pod0-1-sched-preemption-medium-priority" satisfied condition "running" +Aug 24 13:03:02.978: INFO: Waiting up to 5m0s for pod "pod1-0-sched-preemption-medium-priority" in namespace "sched-preemption-1660" to be "running" +Aug 24 13:03:02.985: INFO: Pod "pod1-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 7.417893ms +Aug 24 13:03:02.985: INFO: Pod "pod1-0-sched-preemption-medium-priority" satisfied condition "running" +Aug 24 13:03:02.985: INFO: Waiting up to 5m0s for pod "pod1-1-sched-preemption-medium-priority" in namespace "sched-preemption-1660" to be "running" +Aug 24 13:03:02.993: INFO: Pod "pod1-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 7.88458ms +Aug 24 13:03:02.994: INFO: Pod "pod1-1-sched-preemption-medium-priority" satisfied condition "running" +Aug 24 13:03:02.994: INFO: Waiting up to 5m0s for pod "pod2-0-sched-preemption-medium-priority" in namespace "sched-preemption-1660" to be "running" +Aug 24 13:03:03.000: INFO: Pod "pod2-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 6.183406ms +Aug 24 13:03:03.000: INFO: Pod "pod2-0-sched-preemption-medium-priority" satisfied condition "running" +Aug 24 13:03:03.001: INFO: Waiting up to 5m0s for pod "pod2-1-sched-preemption-medium-priority" in namespace "sched-preemption-1660" to be "running" +Aug 24 13:03:03.007: INFO: Pod "pod2-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 6.580249ms +Aug 24 13:03:03.007: INFO: Pod "pod2-1-sched-preemption-medium-priority" satisfied condition "running" +STEP: Run a high priority pod that has same requirements as that of lower priority pod 08/24/23 13:03:03.007 +Aug 24 13:03:03.026: INFO: Waiting up to 2m0s for pod "preemptor-pod" in namespace "sched-preemption-1660" to be "running" +Aug 24 13:03:03.034: INFO: Pod "preemptor-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 7.773728ms +Aug 24 13:03:05.046: INFO: Pod "preemptor-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019736367s +Aug 24 13:03:07.079: INFO: Pod "preemptor-pod": Phase="Running", Reason="", readiness=true. Elapsed: 4.052975079s +Aug 24 13:03:07.079: INFO: Pod "preemptor-pod" satisfied condition "running" +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:57:59.575: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Job +Aug 24 13:03:07.126: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:84 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Job +[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Job +[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "job-2155" for this suite. 07/29/23 16:57:59.585 +STEP: Destroying namespace "sched-preemption-1660" for this suite. 08/24/23 13:03:07.237 ------------------------------ -• [SLOW TEST] [10.195 seconds] -[sig-apps] Job -test/e2e/apps/framework.go:23 - should manage the lifecycle of a job [Conformance] - test/e2e/apps/job.go:703 +• [SLOW TEST] [68.750 seconds] +[sig-scheduling] SchedulerPreemption [Serial] +test/e2e/scheduling/framework.go:40 + validates basic preemption works [Conformance] + test/e2e/scheduling/preemption.go:130 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Job + [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:57:49.407 - Jul 29 16:57:49.407: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename job 07/29/23 16:57:49.411 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:49.437 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:49.441 - [BeforeEach] [sig-apps] Job + STEP: Creating a kubernetes client 08/24/23 13:01:58.502 + Aug 24 13:01:58.502: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sched-preemption 08/24/23 13:01:58.505 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:01:58.531 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:01:58.536 + [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should manage the lifecycle of a job [Conformance] - test/e2e/apps/job.go:703 - STEP: Creating a suspended job 07/29/23 16:57:49.45 - STEP: Patching the Job 07/29/23 16:57:49.462 - STEP: Watching for Job to be patched 07/29/23 16:57:49.49 - Jul 29 16:57:49.494: INFO: Event ADDED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking:] - Jul 29 16:57:49.494: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking:] - Jul 29 16:57:49.494: INFO: Event MODIFIED found for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking:] - STEP: Updating the job 07/29/23 16:57:49.494 - STEP: Watching for Job to be updated 07/29/23 16:57:49.513 - Jul 29 16:57:49.517: INFO: Event MODIFIED found for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] - Jul 29 16:57:49.517: INFO: Found Job annotations: map[string]string{"batch.kubernetes.io/job-tracking":"", "updated":"true"} - STEP: Listing all Jobs with LabelSelector 07/29/23 16:57:49.517 - Jul 29 16:57:49.524: INFO: Job: e2e-h22fr as labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] - STEP: Waiting for job to complete 07/29/23 16:57:49.524 - STEP: Delete a job collection with a labelselector 07/29/23 16:57:59.534 - STEP: Watching for Job to be deleted 07/29/23 16:57:59.547 - Jul 29 16:57:59.553: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] - Jul 29 16:57:59.554: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] - Jul 29 16:57:59.554: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] - Jul 29 16:57:59.554: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] - Jul 29 16:57:59.554: INFO: Event MODIFIED observed for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] - Jul 29 16:57:59.555: INFO: Event DELETED found for Job e2e-h22fr in namespace job-2155 with labels: map[e2e-h22fr:patched e2e-job-label:e2e-h22fr] and annotations: map[batch.kubernetes.io/job-tracking: updated:true] - STEP: Relist jobs to confirm deletion 07/29/23 16:57:59.555 - [AfterEach] [sig-apps] Job + [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:97 + Aug 24 13:01:58.567: INFO: Waiting up to 1m0s for all nodes to be ready + Aug 24 13:02:58.636: INFO: Waiting for terminating namespaces to be deleted... + [It] validates basic preemption works [Conformance] + test/e2e/scheduling/preemption.go:130 + STEP: Create pods that use 4/5 of node resources. 08/24/23 13:02:58.642 + Aug 24 13:02:58.678: INFO: Created pod: pod0-0-sched-preemption-low-priority + Aug 24 13:02:58.695: INFO: Created pod: pod0-1-sched-preemption-medium-priority + Aug 24 13:02:58.762: INFO: Created pod: pod1-0-sched-preemption-medium-priority + Aug 24 13:02:58.824: INFO: Created pod: pod1-1-sched-preemption-medium-priority + Aug 24 13:02:58.913: INFO: Created pod: pod2-0-sched-preemption-medium-priority + Aug 24 13:02:58.938: INFO: Created pod: pod2-1-sched-preemption-medium-priority + STEP: Wait for pods to be scheduled. 08/24/23 13:02:58.939 + Aug 24 13:02:58.939: INFO: Waiting up to 5m0s for pod "pod0-0-sched-preemption-low-priority" in namespace "sched-preemption-1660" to be "running" + Aug 24 13:02:58.961: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 22.406598ms + Aug 24 13:03:00.972: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 2.03305483s + Aug 24 13:03:02.970: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Running", Reason="", readiness=true. Elapsed: 4.030575975s + Aug 24 13:03:02.970: INFO: Pod "pod0-0-sched-preemption-low-priority" satisfied condition "running" + Aug 24 13:03:02.970: INFO: Waiting up to 5m0s for pod "pod0-1-sched-preemption-medium-priority" in namespace "sched-preemption-1660" to be "running" + Aug 24 13:03:02.977: INFO: Pod "pod0-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 7.026974ms + Aug 24 13:03:02.977: INFO: Pod "pod0-1-sched-preemption-medium-priority" satisfied condition "running" + Aug 24 13:03:02.978: INFO: Waiting up to 5m0s for pod "pod1-0-sched-preemption-medium-priority" in namespace "sched-preemption-1660" to be "running" + Aug 24 13:03:02.985: INFO: Pod "pod1-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 7.417893ms + Aug 24 13:03:02.985: INFO: Pod "pod1-0-sched-preemption-medium-priority" satisfied condition "running" + Aug 24 13:03:02.985: INFO: Waiting up to 5m0s for pod "pod1-1-sched-preemption-medium-priority" in namespace "sched-preemption-1660" to be "running" + Aug 24 13:03:02.993: INFO: Pod "pod1-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 7.88458ms + Aug 24 13:03:02.994: INFO: Pod "pod1-1-sched-preemption-medium-priority" satisfied condition "running" + Aug 24 13:03:02.994: INFO: Waiting up to 5m0s for pod "pod2-0-sched-preemption-medium-priority" in namespace "sched-preemption-1660" to be "running" + Aug 24 13:03:03.000: INFO: Pod "pod2-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 6.183406ms + Aug 24 13:03:03.000: INFO: Pod "pod2-0-sched-preemption-medium-priority" satisfied condition "running" + Aug 24 13:03:03.001: INFO: Waiting up to 5m0s for pod "pod2-1-sched-preemption-medium-priority" in namespace "sched-preemption-1660" to be "running" + Aug 24 13:03:03.007: INFO: Pod "pod2-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 6.580249ms + Aug 24 13:03:03.007: INFO: Pod "pod2-1-sched-preemption-medium-priority" satisfied condition "running" + STEP: Run a high priority pod that has same requirements as that of lower priority pod 08/24/23 13:03:03.007 + Aug 24 13:03:03.026: INFO: Waiting up to 2m0s for pod "preemptor-pod" in namespace "sched-preemption-1660" to be "running" + Aug 24 13:03:03.034: INFO: Pod "preemptor-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 7.773728ms + Aug 24 13:03:05.046: INFO: Pod "preemptor-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019736367s + Aug 24 13:03:07.079: INFO: Pod "preemptor-pod": Phase="Running", Reason="", readiness=true. Elapsed: 4.052975079s + Aug 24 13:03:07.079: INFO: Pod "preemptor-pod" satisfied condition "running" + [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:57:59.575: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Job + Aug 24 13:03:07.126: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + test/e2e/scheduling/preemption.go:84 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Job + [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Job + [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "job-2155" for this suite. 07/29/23 16:57:59.585 + STEP: Destroying namespace "sched-preemption-1660" for this suite. 08/24/23 13:03:07.237 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSS ------------------------------ -[sig-storage] EmptyDir wrapper volumes - should not cause race condition when used for configmaps [Serial] [Conformance] - test/e2e/storage/empty_dir_wrapper.go:189 -[BeforeEach] [sig-storage] EmptyDir wrapper volumes +[sig-node] Security Context + should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + test/e2e/node/security_context.go:129 +[BeforeEach] [sig-node] Security Context set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:57:59.612 -Jul 29 16:57:59.612: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir-wrapper 07/29/23 16:57:59.615 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:59.664 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:59.667 -[BeforeEach] [sig-storage] EmptyDir wrapper volumes +STEP: Creating a kubernetes client 08/24/23 13:03:07.252 +Aug 24 13:03:07.252: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename security-context 08/24/23 13:03:07.255 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:07.284 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:07.288 +[BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 -[It] should not cause race condition when used for configmaps [Serial] [Conformance] - test/e2e/storage/empty_dir_wrapper.go:189 -STEP: Creating 50 configmaps 07/29/23 16:57:59.671 -STEP: Creating RC which spawns configmap-volume pods 07/29/23 16:58:00.107 -Jul 29 16:58:00.216: INFO: Pod name wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826: Found 0 pods out of 5 -Jul 29 16:58:05.233: INFO: Pod name wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826: Found 5 pods out of 5 -STEP: Ensuring each pod is running 07/29/23 16:58:05.233 -Jul 29 16:58:05.233: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:05.242: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Pending", Reason="", readiness=false. Elapsed: 8.716053ms -Jul 29 16:58:07.256: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022218902s -Jul 29 16:58:09.255: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Pending", Reason="", readiness=false. Elapsed: 4.021455644s -Jul 29 16:58:11.255: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Pending", Reason="", readiness=false. Elapsed: 6.021332517s -Jul 29 16:58:13.262: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Pending", Reason="", readiness=false. Elapsed: 8.028587732s -Jul 29 16:58:15.252: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Running", Reason="", readiness=true. Elapsed: 10.018222829s -Jul 29 16:58:15.252: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7" satisfied condition "running" -Jul 29 16:58:15.252: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-fq6g9" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:15.257: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-fq6g9": Phase="Running", Reason="", readiness=true. Elapsed: 5.454505ms -Jul 29 16:58:15.258: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-fq6g9" satisfied condition "running" -Jul 29 16:58:15.258: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-trmkj" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:15.264: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-trmkj": Phase="Pending", Reason="", readiness=false. Elapsed: 5.935022ms -Jul 29 16:58:17.279: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-trmkj": Phase="Running", Reason="", readiness=true. Elapsed: 2.021355484s -Jul 29 16:58:17.279: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-trmkj" satisfied condition "running" -Jul 29 16:58:17.279: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zmxv4" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:17.286: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zmxv4": Phase="Running", Reason="", readiness=true. Elapsed: 6.484361ms -Jul 29 16:58:17.286: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zmxv4" satisfied condition "running" -Jul 29 16:58:17.286: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zvqhs" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:17.293: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zvqhs": Phase="Running", Reason="", readiness=true. Elapsed: 7.241288ms -Jul 29 16:58:17.293: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zvqhs" satisfied condition "running" -STEP: deleting ReplicationController wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826 in namespace emptydir-wrapper-67, will wait for the garbage collector to delete the pods 07/29/23 16:58:17.294 -Jul 29 16:58:17.375: INFO: Deleting ReplicationController wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826 took: 20.368007ms -Jul 29 16:58:17.576: INFO: Terminating ReplicationController wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826 pods took: 200.946553ms -STEP: Creating RC which spawns configmap-volume pods 07/29/23 16:58:20.387 -Jul 29 16:58:20.429: INFO: Pod name wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e: Found 0 pods out of 5 -Jul 29 16:58:25.448: INFO: Pod name wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e: Found 5 pods out of 5 -STEP: Ensuring each pod is running 07/29/23 16:58:25.448 -Jul 29 16:58:25.449: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:25.455: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 6.443146ms -Jul 29 16:58:27.463: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014312925s -Jul 29 16:58:29.468: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018888682s -Jul 29 16:58:31.467: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 6.018236213s -Jul 29 16:58:33.470: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 8.021440542s -Jul 29 16:58:35.466: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 10.017087456s -Jul 29 16:58:37.467: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Running", Reason="", readiness=true. Elapsed: 12.018581398s -Jul 29 16:58:37.468: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb" satisfied condition "running" -Jul 29 16:58:37.468: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-c85t2" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:37.478: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-c85t2": Phase="Running", Reason="", readiness=true. Elapsed: 9.327331ms -Jul 29 16:58:37.478: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-c85t2" satisfied condition "running" -Jul 29 16:58:37.478: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-h5gz5" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:37.485: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-h5gz5": Phase="Running", Reason="", readiness=true. Elapsed: 7.102222ms -Jul 29 16:58:37.485: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-h5gz5" satisfied condition "running" -Jul 29 16:58:37.485: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-nhfmk" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:37.492: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-nhfmk": Phase="Running", Reason="", readiness=true. Elapsed: 7.236348ms -Jul 29 16:58:37.492: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-nhfmk" satisfied condition "running" -Jul 29 16:58:37.493: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-rlv65" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:37.499: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-rlv65": Phase="Running", Reason="", readiness=true. Elapsed: 6.742476ms -Jul 29 16:58:37.500: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-rlv65" satisfied condition "running" -STEP: deleting ReplicationController wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e in namespace emptydir-wrapper-67, will wait for the garbage collector to delete the pods 07/29/23 16:58:37.5 -Jul 29 16:58:37.569: INFO: Deleting ReplicationController wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e took: 12.076048ms -Jul 29 16:58:37.669: INFO: Terminating ReplicationController wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e pods took: 100.461306ms -STEP: Creating RC which spawns configmap-volume pods 07/29/23 16:58:40.187 -Jul 29 16:58:40.211: INFO: Pod name wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf: Found 0 pods out of 5 -Jul 29 16:58:45.222: INFO: Pod name wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf: Found 5 pods out of 5 -STEP: Ensuring each pod is running 07/29/23 16:58:45.222 -Jul 29 16:58:45.223: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:45.231: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Pending", Reason="", readiness=false. Elapsed: 7.804844ms -Jul 29 16:58:47.240: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017416461s -Jul 29 16:58:49.243: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Pending", Reason="", readiness=false. Elapsed: 4.019668232s -Jul 29 16:58:51.240: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Pending", Reason="", readiness=false. Elapsed: 6.016995454s -Jul 29 16:58:53.290: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Pending", Reason="", readiness=false. Elapsed: 8.067177265s -Jul 29 16:58:55.245: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Running", Reason="", readiness=true. Elapsed: 10.022196891s -Jul 29 16:58:55.245: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l" satisfied condition "running" -Jul 29 16:58:55.245: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-gdd75" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:55.254: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-gdd75": Phase="Running", Reason="", readiness=true. Elapsed: 8.763103ms -Jul 29 16:58:55.254: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-gdd75" satisfied condition "running" -Jul 29 16:58:55.254: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-t9lmk" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:55.260: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-t9lmk": Phase="Running", Reason="", readiness=true. Elapsed: 5.528561ms -Jul 29 16:58:55.260: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-t9lmk" satisfied condition "running" -Jul 29 16:58:55.260: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-w664r" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:55.267: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-w664r": Phase="Running", Reason="", readiness=true. Elapsed: 6.833042ms -Jul 29 16:58:55.267: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-w664r" satisfied condition "running" -Jul 29 16:58:55.267: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-x8bvp" in namespace "emptydir-wrapper-67" to be "running" -Jul 29 16:58:55.273: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-x8bvp": Phase="Running", Reason="", readiness=true. Elapsed: 5.929138ms -Jul 29 16:58:55.273: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-x8bvp" satisfied condition "running" -STEP: deleting ReplicationController wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf in namespace emptydir-wrapper-67, will wait for the garbage collector to delete the pods 07/29/23 16:58:55.273 -Jul 29 16:58:55.340: INFO: Deleting ReplicationController wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf took: 10.392532ms -Jul 29 16:58:55.541: INFO: Terminating ReplicationController wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf pods took: 201.087556ms -STEP: Cleaning up the configMaps 07/29/23 16:58:58.842 -[AfterEach] [sig-storage] EmptyDir wrapper volumes +[It] should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + test/e2e/node/security_context.go:129 +STEP: Creating a pod to test pod.Spec.SecurityContext.RunAsUser 08/24/23 13:03:07.293 +Aug 24 13:03:07.307: INFO: Waiting up to 5m0s for pod "security-context-a974adab-c395-418d-83fe-3ead041d589a" in namespace "security-context-1864" to be "Succeeded or Failed" +Aug 24 13:03:07.315: INFO: Pod "security-context-a974adab-c395-418d-83fe-3ead041d589a": Phase="Pending", Reason="", readiness=false. Elapsed: 7.744175ms +Aug 24 13:03:09.325: INFO: Pod "security-context-a974adab-c395-418d-83fe-3ead041d589a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017094852s +Aug 24 13:03:11.325: INFO: Pod "security-context-a974adab-c395-418d-83fe-3ead041d589a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017152206s +STEP: Saw pod success 08/24/23 13:03:11.325 +Aug 24 13:03:11.326: INFO: Pod "security-context-a974adab-c395-418d-83fe-3ead041d589a" satisfied condition "Succeeded or Failed" +Aug 24 13:03:11.335: INFO: Trying to get logs from node pe9deep4seen-3 pod security-context-a974adab-c395-418d-83fe-3ead041d589a container test-container: +STEP: delete the pod 08/24/23 13:03:11.366 +Aug 24 13:03:11.397: INFO: Waiting for pod security-context-a974adab-c395-418d-83fe-3ead041d589a to disappear +Aug 24 13:03:11.443: INFO: Pod security-context-a974adab-c395-418d-83fe-3ead041d589a no longer exists +[AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 -Jul 29 16:58:59.349: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes +Aug 24 13:03:11.444: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes +[DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes +[DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-wrapper-67" for this suite. 07/29/23 16:58:59.358 +STEP: Destroying namespace "security-context-1864" for this suite. 08/24/23 13:03:11.467 ------------------------------ -• [SLOW TEST] [59.764 seconds] -[sig-storage] EmptyDir wrapper volumes -test/e2e/storage/utils/framework.go:23 - should not cause race condition when used for configmaps [Serial] [Conformance] - test/e2e/storage/empty_dir_wrapper.go:189 +• [4.237 seconds] +[sig-node] Security Context +test/e2e/node/framework.go:23 + should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + test/e2e/node/security_context.go:129 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir wrapper volumes + [BeforeEach] [sig-node] Security Context set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:57:59.612 - Jul 29 16:57:59.612: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir-wrapper 07/29/23 16:57:59.615 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:57:59.664 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:57:59.667 - [BeforeEach] [sig-storage] EmptyDir wrapper volumes + STEP: Creating a kubernetes client 08/24/23 13:03:07.252 + Aug 24 13:03:07.252: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename security-context 08/24/23 13:03:07.255 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:07.284 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:07.288 + [BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 - [It] should not cause race condition when used for configmaps [Serial] [Conformance] - test/e2e/storage/empty_dir_wrapper.go:189 - STEP: Creating 50 configmaps 07/29/23 16:57:59.671 - STEP: Creating RC which spawns configmap-volume pods 07/29/23 16:58:00.107 - Jul 29 16:58:00.216: INFO: Pod name wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826: Found 0 pods out of 5 - Jul 29 16:58:05.233: INFO: Pod name wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826: Found 5 pods out of 5 - STEP: Ensuring each pod is running 07/29/23 16:58:05.233 - Jul 29 16:58:05.233: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:05.242: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Pending", Reason="", readiness=false. Elapsed: 8.716053ms - Jul 29 16:58:07.256: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022218902s - Jul 29 16:58:09.255: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Pending", Reason="", readiness=false. Elapsed: 4.021455644s - Jul 29 16:58:11.255: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Pending", Reason="", readiness=false. Elapsed: 6.021332517s - Jul 29 16:58:13.262: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Pending", Reason="", readiness=false. Elapsed: 8.028587732s - Jul 29 16:58:15.252: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7": Phase="Running", Reason="", readiness=true. Elapsed: 10.018222829s - Jul 29 16:58:15.252: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-9bfg7" satisfied condition "running" - Jul 29 16:58:15.252: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-fq6g9" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:15.257: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-fq6g9": Phase="Running", Reason="", readiness=true. Elapsed: 5.454505ms - Jul 29 16:58:15.258: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-fq6g9" satisfied condition "running" - Jul 29 16:58:15.258: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-trmkj" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:15.264: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-trmkj": Phase="Pending", Reason="", readiness=false. Elapsed: 5.935022ms - Jul 29 16:58:17.279: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-trmkj": Phase="Running", Reason="", readiness=true. Elapsed: 2.021355484s - Jul 29 16:58:17.279: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-trmkj" satisfied condition "running" - Jul 29 16:58:17.279: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zmxv4" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:17.286: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zmxv4": Phase="Running", Reason="", readiness=true. Elapsed: 6.484361ms - Jul 29 16:58:17.286: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zmxv4" satisfied condition "running" - Jul 29 16:58:17.286: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zvqhs" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:17.293: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zvqhs": Phase="Running", Reason="", readiness=true. Elapsed: 7.241288ms - Jul 29 16:58:17.293: INFO: Pod "wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826-zvqhs" satisfied condition "running" - STEP: deleting ReplicationController wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826 in namespace emptydir-wrapper-67, will wait for the garbage collector to delete the pods 07/29/23 16:58:17.294 - Jul 29 16:58:17.375: INFO: Deleting ReplicationController wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826 took: 20.368007ms - Jul 29 16:58:17.576: INFO: Terminating ReplicationController wrapped-volume-race-ae983807-4a9c-4520-96b5-bded3cd19826 pods took: 200.946553ms - STEP: Creating RC which spawns configmap-volume pods 07/29/23 16:58:20.387 - Jul 29 16:58:20.429: INFO: Pod name wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e: Found 0 pods out of 5 - Jul 29 16:58:25.448: INFO: Pod name wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e: Found 5 pods out of 5 - STEP: Ensuring each pod is running 07/29/23 16:58:25.448 - Jul 29 16:58:25.449: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:25.455: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 6.443146ms - Jul 29 16:58:27.463: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014312925s - Jul 29 16:58:29.468: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 4.018888682s - Jul 29 16:58:31.467: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 6.018236213s - Jul 29 16:58:33.470: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 8.021440542s - Jul 29 16:58:35.466: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Pending", Reason="", readiness=false. Elapsed: 10.017087456s - Jul 29 16:58:37.467: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb": Phase="Running", Reason="", readiness=true. Elapsed: 12.018581398s - Jul 29 16:58:37.468: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-9d6rb" satisfied condition "running" - Jul 29 16:58:37.468: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-c85t2" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:37.478: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-c85t2": Phase="Running", Reason="", readiness=true. Elapsed: 9.327331ms - Jul 29 16:58:37.478: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-c85t2" satisfied condition "running" - Jul 29 16:58:37.478: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-h5gz5" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:37.485: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-h5gz5": Phase="Running", Reason="", readiness=true. Elapsed: 7.102222ms - Jul 29 16:58:37.485: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-h5gz5" satisfied condition "running" - Jul 29 16:58:37.485: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-nhfmk" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:37.492: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-nhfmk": Phase="Running", Reason="", readiness=true. Elapsed: 7.236348ms - Jul 29 16:58:37.492: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-nhfmk" satisfied condition "running" - Jul 29 16:58:37.493: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-rlv65" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:37.499: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-rlv65": Phase="Running", Reason="", readiness=true. Elapsed: 6.742476ms - Jul 29 16:58:37.500: INFO: Pod "wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e-rlv65" satisfied condition "running" - STEP: deleting ReplicationController wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e in namespace emptydir-wrapper-67, will wait for the garbage collector to delete the pods 07/29/23 16:58:37.5 - Jul 29 16:58:37.569: INFO: Deleting ReplicationController wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e took: 12.076048ms - Jul 29 16:58:37.669: INFO: Terminating ReplicationController wrapped-volume-race-ae231e50-5471-4e72-b17d-c0bcffe5962e pods took: 100.461306ms - STEP: Creating RC which spawns configmap-volume pods 07/29/23 16:58:40.187 - Jul 29 16:58:40.211: INFO: Pod name wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf: Found 0 pods out of 5 - Jul 29 16:58:45.222: INFO: Pod name wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf: Found 5 pods out of 5 - STEP: Ensuring each pod is running 07/29/23 16:58:45.222 - Jul 29 16:58:45.223: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:45.231: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Pending", Reason="", readiness=false. Elapsed: 7.804844ms - Jul 29 16:58:47.240: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017416461s - Jul 29 16:58:49.243: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Pending", Reason="", readiness=false. Elapsed: 4.019668232s - Jul 29 16:58:51.240: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Pending", Reason="", readiness=false. Elapsed: 6.016995454s - Jul 29 16:58:53.290: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Pending", Reason="", readiness=false. Elapsed: 8.067177265s - Jul 29 16:58:55.245: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l": Phase="Running", Reason="", readiness=true. Elapsed: 10.022196891s - Jul 29 16:58:55.245: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-dl66l" satisfied condition "running" - Jul 29 16:58:55.245: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-gdd75" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:55.254: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-gdd75": Phase="Running", Reason="", readiness=true. Elapsed: 8.763103ms - Jul 29 16:58:55.254: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-gdd75" satisfied condition "running" - Jul 29 16:58:55.254: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-t9lmk" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:55.260: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-t9lmk": Phase="Running", Reason="", readiness=true. Elapsed: 5.528561ms - Jul 29 16:58:55.260: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-t9lmk" satisfied condition "running" - Jul 29 16:58:55.260: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-w664r" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:55.267: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-w664r": Phase="Running", Reason="", readiness=true. Elapsed: 6.833042ms - Jul 29 16:58:55.267: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-w664r" satisfied condition "running" - Jul 29 16:58:55.267: INFO: Waiting up to 5m0s for pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-x8bvp" in namespace "emptydir-wrapper-67" to be "running" - Jul 29 16:58:55.273: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-x8bvp": Phase="Running", Reason="", readiness=true. Elapsed: 5.929138ms - Jul 29 16:58:55.273: INFO: Pod "wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf-x8bvp" satisfied condition "running" - STEP: deleting ReplicationController wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf in namespace emptydir-wrapper-67, will wait for the garbage collector to delete the pods 07/29/23 16:58:55.273 - Jul 29 16:58:55.340: INFO: Deleting ReplicationController wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf took: 10.392532ms - Jul 29 16:58:55.541: INFO: Terminating ReplicationController wrapped-volume-race-e73e8574-b68e-4655-8725-bf8acdb50edf pods took: 201.087556ms - STEP: Cleaning up the configMaps 07/29/23 16:58:58.842 - [AfterEach] [sig-storage] EmptyDir wrapper volumes + [It] should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + test/e2e/node/security_context.go:129 + STEP: Creating a pod to test pod.Spec.SecurityContext.RunAsUser 08/24/23 13:03:07.293 + Aug 24 13:03:07.307: INFO: Waiting up to 5m0s for pod "security-context-a974adab-c395-418d-83fe-3ead041d589a" in namespace "security-context-1864" to be "Succeeded or Failed" + Aug 24 13:03:07.315: INFO: Pod "security-context-a974adab-c395-418d-83fe-3ead041d589a": Phase="Pending", Reason="", readiness=false. Elapsed: 7.744175ms + Aug 24 13:03:09.325: INFO: Pod "security-context-a974adab-c395-418d-83fe-3ead041d589a": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017094852s + Aug 24 13:03:11.325: INFO: Pod "security-context-a974adab-c395-418d-83fe-3ead041d589a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017152206s + STEP: Saw pod success 08/24/23 13:03:11.325 + Aug 24 13:03:11.326: INFO: Pod "security-context-a974adab-c395-418d-83fe-3ead041d589a" satisfied condition "Succeeded or Failed" + Aug 24 13:03:11.335: INFO: Trying to get logs from node pe9deep4seen-3 pod security-context-a974adab-c395-418d-83fe-3ead041d589a container test-container: + STEP: delete the pod 08/24/23 13:03:11.366 + Aug 24 13:03:11.397: INFO: Waiting for pod security-context-a974adab-c395-418d-83fe-3ead041d589a to disappear + Aug 24 13:03:11.443: INFO: Pod security-context-a974adab-c395-418d-83fe-3ead041d589a no longer exists + [AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 - Jul 29 16:58:59.349: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes + Aug 24 13:03:11.444: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes + [DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir wrapper volumes + [DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-wrapper-67" for this suite. 07/29/23 16:58:59.358 + STEP: Destroying namespace "security-context-1864" for this suite. 08/24/23 13:03:11.467 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSS +SSSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should be able to deny custom resource creation, update and deletion [Conformance] - test/e2e/apimachinery/webhook.go:221 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-node] PodTemplates + should run the lifecycle of PodTemplates [Conformance] + test/e2e/common/node/podtemplates.go:53 +[BeforeEach] [sig-node] PodTemplates set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:58:59.378 -Jul 29 16:58:59.378: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 16:58:59.382 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:58:59.405 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:58:59.411 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 13:03:11.494 +Aug 24 13:03:11.494: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename podtemplate 08/24/23 13:03:11.496 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:11.535 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:11.542 +[BeforeEach] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 16:58:59.434 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:59:00.112 -STEP: Deploying the webhook pod 07/29/23 16:59:00.127 -STEP: Wait for the deployment to be ready 07/29/23 16:59:00.147 -Jul 29 16:59:00.171: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -Jul 29 16:59:02.192: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 16, 59, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 16, 59, 0, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 16, 59, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 16, 59, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-865554f4d9\" is progressing."}}, CollisionCount:(*int32)(nil)} -STEP: Deploying the webhook service 07/29/23 16:59:04.202 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:59:04.222 -Jul 29 16:59:05.222: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should be able to deny custom resource creation, update and deletion [Conformance] - test/e2e/apimachinery/webhook.go:221 -Jul 29 16:59:05.231: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Registering the custom resource webhook via the AdmissionRegistration API 07/29/23 16:59:05.751 -STEP: Creating a custom resource that should be denied by the webhook 07/29/23 16:59:05.782 -STEP: Creating a custom resource whose deletion would be denied by the webhook 07/29/23 16:59:07.951 -STEP: Updating the custom resource with disallowed data should be denied 07/29/23 16:59:07.965 -STEP: Deleting the custom resource should be denied 07/29/23 16:59:07.983 -STEP: Remove the offending key and value from the custom resource data 07/29/23 16:59:07.993 -STEP: Deleting the updated custom resource should be successful 07/29/23 16:59:08.009 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[It] should run the lifecycle of PodTemplates [Conformance] + test/e2e/common/node/podtemplates.go:53 +[AfterEach] [sig-node] PodTemplates test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:08.568: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 13:03:11.663: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] PodTemplates dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] PodTemplates tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-6167" for this suite. 07/29/23 16:59:08.73 -STEP: Destroying namespace "webhook-6167-markers" for this suite. 07/29/23 16:59:08.766 +STEP: Destroying namespace "podtemplate-6976" for this suite. 08/24/23 13:03:11.674 ------------------------------ -• [SLOW TEST] [9.482 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should be able to deny custom resource creation, update and deletion [Conformance] - test/e2e/apimachinery/webhook.go:221 +• [0.199 seconds] +[sig-node] PodTemplates +test/e2e/common/node/framework.go:23 + should run the lifecycle of PodTemplates [Conformance] + test/e2e/common/node/podtemplates.go:53 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] PodTemplates set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:58:59.378 - Jul 29 16:58:59.378: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 16:58:59.382 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:58:59.405 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:58:59.411 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 13:03:11.494 + Aug 24 13:03:11.494: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename podtemplate 08/24/23 13:03:11.496 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:11.535 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:11.542 + [BeforeEach] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 16:58:59.434 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:59:00.112 - STEP: Deploying the webhook pod 07/29/23 16:59:00.127 - STEP: Wait for the deployment to be ready 07/29/23 16:59:00.147 - Jul 29 16:59:00.171: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set - Jul 29 16:59:02.192: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:time.Date(2023, time.July, 29, 16, 59, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 16, 59, 0, 0, time.Local), Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 16, 59, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 16, 59, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-865554f4d9\" is progressing."}}, CollisionCount:(*int32)(nil)} - STEP: Deploying the webhook service 07/29/23 16:59:04.202 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:59:04.222 - Jul 29 16:59:05.222: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should be able to deny custom resource creation, update and deletion [Conformance] - test/e2e/apimachinery/webhook.go:221 - Jul 29 16:59:05.231: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Registering the custom resource webhook via the AdmissionRegistration API 07/29/23 16:59:05.751 - STEP: Creating a custom resource that should be denied by the webhook 07/29/23 16:59:05.782 - STEP: Creating a custom resource whose deletion would be denied by the webhook 07/29/23 16:59:07.951 - STEP: Updating the custom resource with disallowed data should be denied 07/29/23 16:59:07.965 - STEP: Deleting the custom resource should be denied 07/29/23 16:59:07.983 - STEP: Remove the offending key and value from the custom resource data 07/29/23 16:59:07.993 - STEP: Deleting the updated custom resource should be successful 07/29/23 16:59:08.009 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [It] should run the lifecycle of PodTemplates [Conformance] + test/e2e/common/node/podtemplates.go:53 + [AfterEach] [sig-node] PodTemplates test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:08.568: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 13:03:11.663: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] PodTemplates dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] PodTemplates tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-6167" for this suite. 07/29/23 16:59:08.73 - STEP: Destroying namespace "webhook-6167-markers" for this suite. 07/29/23 16:59:08.766 + STEP: Destroying namespace "podtemplate-6976" for this suite. 08/24/23 13:03:11.674 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Discovery - should validate PreferredVersion for each APIGroup [Conformance] - test/e2e/apimachinery/discovery.go:122 -[BeforeEach] [sig-api-machinery] Discovery +[sig-apps] ReplicaSet + should validate Replicaset Status endpoints [Conformance] + test/e2e/apps/replica_set.go:176 +[BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:08.866 -Jul 29 16:59:08.866: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename discovery 07/29/23 16:59:08.878 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:08.917 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:08.92 -[BeforeEach] [sig-api-machinery] Discovery +STEP: Creating a kubernetes client 08/24/23 13:03:11.695 +Aug 24 13:03:11.695: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replicaset 08/24/23 13:03:11.697 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:11.747 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:11.76 +[BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] Discovery - test/e2e/apimachinery/discovery.go:43 -STEP: Setting up server cert 07/29/23 16:59:08.93 -[It] should validate PreferredVersion for each APIGroup [Conformance] - test/e2e/apimachinery/discovery.go:122 -Jul 29 16:59:09.546: INFO: Checking APIGroup: apiregistration.k8s.io -Jul 29 16:59:09.548: INFO: PreferredVersion.GroupVersion: apiregistration.k8s.io/v1 -Jul 29 16:59:09.548: INFO: Versions found [{apiregistration.k8s.io/v1 v1}] -Jul 29 16:59:09.548: INFO: apiregistration.k8s.io/v1 matches apiregistration.k8s.io/v1 -Jul 29 16:59:09.548: INFO: Checking APIGroup: apps -Jul 29 16:59:09.550: INFO: PreferredVersion.GroupVersion: apps/v1 -Jul 29 16:59:09.550: INFO: Versions found [{apps/v1 v1}] -Jul 29 16:59:09.550: INFO: apps/v1 matches apps/v1 -Jul 29 16:59:09.550: INFO: Checking APIGroup: events.k8s.io -Jul 29 16:59:09.551: INFO: PreferredVersion.GroupVersion: events.k8s.io/v1 -Jul 29 16:59:09.551: INFO: Versions found [{events.k8s.io/v1 v1}] -Jul 29 16:59:09.551: INFO: events.k8s.io/v1 matches events.k8s.io/v1 -Jul 29 16:59:09.551: INFO: Checking APIGroup: authentication.k8s.io -Jul 29 16:59:09.553: INFO: PreferredVersion.GroupVersion: authentication.k8s.io/v1 -Jul 29 16:59:09.553: INFO: Versions found [{authentication.k8s.io/v1 v1}] -Jul 29 16:59:09.553: INFO: authentication.k8s.io/v1 matches authentication.k8s.io/v1 -Jul 29 16:59:09.553: INFO: Checking APIGroup: authorization.k8s.io -Jul 29 16:59:09.555: INFO: PreferredVersion.GroupVersion: authorization.k8s.io/v1 -Jul 29 16:59:09.555: INFO: Versions found [{authorization.k8s.io/v1 v1}] -Jul 29 16:59:09.555: INFO: authorization.k8s.io/v1 matches authorization.k8s.io/v1 -Jul 29 16:59:09.555: INFO: Checking APIGroup: autoscaling -Jul 29 16:59:09.557: INFO: PreferredVersion.GroupVersion: autoscaling/v2 -Jul 29 16:59:09.557: INFO: Versions found [{autoscaling/v2 v2} {autoscaling/v1 v1}] -Jul 29 16:59:09.557: INFO: autoscaling/v2 matches autoscaling/v2 -Jul 29 16:59:09.557: INFO: Checking APIGroup: batch -Jul 29 16:59:09.558: INFO: PreferredVersion.GroupVersion: batch/v1 -Jul 29 16:59:09.558: INFO: Versions found [{batch/v1 v1}] -Jul 29 16:59:09.558: INFO: batch/v1 matches batch/v1 -Jul 29 16:59:09.558: INFO: Checking APIGroup: certificates.k8s.io -Jul 29 16:59:09.560: INFO: PreferredVersion.GroupVersion: certificates.k8s.io/v1 -Jul 29 16:59:09.560: INFO: Versions found [{certificates.k8s.io/v1 v1}] -Jul 29 16:59:09.560: INFO: certificates.k8s.io/v1 matches certificates.k8s.io/v1 -Jul 29 16:59:09.560: INFO: Checking APIGroup: networking.k8s.io -Jul 29 16:59:09.562: INFO: PreferredVersion.GroupVersion: networking.k8s.io/v1 -Jul 29 16:59:09.562: INFO: Versions found [{networking.k8s.io/v1 v1}] -Jul 29 16:59:09.562: INFO: networking.k8s.io/v1 matches networking.k8s.io/v1 -Jul 29 16:59:09.562: INFO: Checking APIGroup: policy -Jul 29 16:59:09.564: INFO: PreferredVersion.GroupVersion: policy/v1 -Jul 29 16:59:09.564: INFO: Versions found [{policy/v1 v1}] -Jul 29 16:59:09.564: INFO: policy/v1 matches policy/v1 -Jul 29 16:59:09.564: INFO: Checking APIGroup: rbac.authorization.k8s.io -Jul 29 16:59:09.565: INFO: PreferredVersion.GroupVersion: rbac.authorization.k8s.io/v1 -Jul 29 16:59:09.565: INFO: Versions found [{rbac.authorization.k8s.io/v1 v1}] -Jul 29 16:59:09.566: INFO: rbac.authorization.k8s.io/v1 matches rbac.authorization.k8s.io/v1 -Jul 29 16:59:09.568: INFO: Checking APIGroup: storage.k8s.io -Jul 29 16:59:09.573: INFO: PreferredVersion.GroupVersion: storage.k8s.io/v1 -Jul 29 16:59:09.573: INFO: Versions found [{storage.k8s.io/v1 v1} {storage.k8s.io/v1beta1 v1beta1}] -Jul 29 16:59:09.573: INFO: storage.k8s.io/v1 matches storage.k8s.io/v1 -Jul 29 16:59:09.574: INFO: Checking APIGroup: admissionregistration.k8s.io -Jul 29 16:59:09.576: INFO: PreferredVersion.GroupVersion: admissionregistration.k8s.io/v1 -Jul 29 16:59:09.576: INFO: Versions found [{admissionregistration.k8s.io/v1 v1}] -Jul 29 16:59:09.576: INFO: admissionregistration.k8s.io/v1 matches admissionregistration.k8s.io/v1 -Jul 29 16:59:09.577: INFO: Checking APIGroup: apiextensions.k8s.io -Jul 29 16:59:09.579: INFO: PreferredVersion.GroupVersion: apiextensions.k8s.io/v1 -Jul 29 16:59:09.579: INFO: Versions found [{apiextensions.k8s.io/v1 v1}] -Jul 29 16:59:09.579: INFO: apiextensions.k8s.io/v1 matches apiextensions.k8s.io/v1 -Jul 29 16:59:09.579: INFO: Checking APIGroup: scheduling.k8s.io -Jul 29 16:59:09.583: INFO: PreferredVersion.GroupVersion: scheduling.k8s.io/v1 -Jul 29 16:59:09.583: INFO: Versions found [{scheduling.k8s.io/v1 v1}] -Jul 29 16:59:09.583: INFO: scheduling.k8s.io/v1 matches scheduling.k8s.io/v1 -Jul 29 16:59:09.583: INFO: Checking APIGroup: coordination.k8s.io -Jul 29 16:59:09.585: INFO: PreferredVersion.GroupVersion: coordination.k8s.io/v1 -Jul 29 16:59:09.585: INFO: Versions found [{coordination.k8s.io/v1 v1}] -Jul 29 16:59:09.585: INFO: coordination.k8s.io/v1 matches coordination.k8s.io/v1 -Jul 29 16:59:09.585: INFO: Checking APIGroup: node.k8s.io -Jul 29 16:59:09.587: INFO: PreferredVersion.GroupVersion: node.k8s.io/v1 -Jul 29 16:59:09.587: INFO: Versions found [{node.k8s.io/v1 v1}] -Jul 29 16:59:09.587: INFO: node.k8s.io/v1 matches node.k8s.io/v1 -Jul 29 16:59:09.587: INFO: Checking APIGroup: discovery.k8s.io -Jul 29 16:59:09.589: INFO: PreferredVersion.GroupVersion: discovery.k8s.io/v1 -Jul 29 16:59:09.589: INFO: Versions found [{discovery.k8s.io/v1 v1}] -Jul 29 16:59:09.589: INFO: discovery.k8s.io/v1 matches discovery.k8s.io/v1 -Jul 29 16:59:09.589: INFO: Checking APIGroup: flowcontrol.apiserver.k8s.io -Jul 29 16:59:09.591: INFO: PreferredVersion.GroupVersion: flowcontrol.apiserver.k8s.io/v1beta3 -Jul 29 16:59:09.591: INFO: Versions found [{flowcontrol.apiserver.k8s.io/v1beta3 v1beta3} {flowcontrol.apiserver.k8s.io/v1beta2 v1beta2}] -Jul 29 16:59:09.591: INFO: flowcontrol.apiserver.k8s.io/v1beta3 matches flowcontrol.apiserver.k8s.io/v1beta3 -Jul 29 16:59:09.591: INFO: Checking APIGroup: cilium.io -Jul 29 16:59:09.593: INFO: PreferredVersion.GroupVersion: cilium.io/v2 -Jul 29 16:59:09.593: INFO: Versions found [{cilium.io/v2 v2} {cilium.io/v2alpha1 v2alpha1}] -Jul 29 16:59:09.594: INFO: cilium.io/v2 matches cilium.io/v2 -[AfterEach] [sig-api-machinery] Discovery +[It] should validate Replicaset Status endpoints [Conformance] + test/e2e/apps/replica_set.go:176 +STEP: Create a Replicaset 08/24/23 13:03:11.779 +STEP: Verify that the required pods have come up. 08/24/23 13:03:11.807 +Aug 24 13:03:11.816: INFO: Pod name sample-pod: Found 0 pods out of 1 +Aug 24 13:03:16.830: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running 08/24/23 13:03:16.83 +STEP: Getting /status 08/24/23 13:03:16.83 +Aug 24 13:03:16.845: INFO: Replicaset test-rs has Conditions: [] +STEP: updating the Replicaset Status 08/24/23 13:03:16.845 +Aug 24 13:03:16.878: INFO: updatedStatus.Conditions: []v1.ReplicaSetCondition{v1.ReplicaSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the ReplicaSet status to be updated 08/24/23 13:03:16.878 +Aug 24 13:03:16.887: INFO: Observed &ReplicaSet event: ADDED +Aug 24 13:03:16.887: INFO: Observed &ReplicaSet event: MODIFIED +Aug 24 13:03:16.887: INFO: Observed &ReplicaSet event: MODIFIED +Aug 24 13:03:16.889: INFO: Observed &ReplicaSet event: MODIFIED +Aug 24 13:03:16.890: INFO: Found replicaset test-rs in namespace replicaset-8107 with labels: map[name:sample-pod pod:httpd] annotations: map[] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] +Aug 24 13:03:16.890: INFO: Replicaset test-rs has an updated status +STEP: patching the Replicaset Status 08/24/23 13:03:16.89 +Aug 24 13:03:16.891: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} +Aug 24 13:03:16.923: INFO: Patched status conditions: []v1.ReplicaSetCondition{v1.ReplicaSetCondition{Type:"StatusPatched", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} +STEP: watching for the Replicaset status to be patched 08/24/23 13:03:16.924 +Aug 24 13:03:16.940: INFO: Observed &ReplicaSet event: ADDED +Aug 24 13:03:16.940: INFO: Observed &ReplicaSet event: MODIFIED +Aug 24 13:03:16.941: INFO: Observed &ReplicaSet event: MODIFIED +Aug 24 13:03:16.941: INFO: Observed &ReplicaSet event: MODIFIED +Aug 24 13:03:16.941: INFO: Observed replicaset test-rs in namespace replicaset-8107 with annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} +Aug 24 13:03:16.942: INFO: Observed &ReplicaSet event: MODIFIED +Aug 24 13:03:16.942: INFO: Found replicaset test-rs in namespace replicaset-8107 with labels: map[name:sample-pod pod:httpd] annotations: map[] & Conditions: {StatusPatched True 0001-01-01 00:00:00 +0000 UTC } +Aug 24 13:03:16.943: INFO: Replicaset test-rs has a patched status +[AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:09.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Discovery +Aug 24 13:03:16.943: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Discovery +[DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Discovery +[DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 -STEP: Destroying namespace "discovery-8829" for this suite. 07/29/23 16:59:09.611 +STEP: Destroying namespace "replicaset-8107" for this suite. 08/24/23 13:03:16.965 ------------------------------ -• [0.759 seconds] -[sig-api-machinery] Discovery -test/e2e/apimachinery/framework.go:23 - should validate PreferredVersion for each APIGroup [Conformance] - test/e2e/apimachinery/discovery.go:122 +• [SLOW TEST] [5.303 seconds] +[sig-apps] ReplicaSet +test/e2e/apps/framework.go:23 + should validate Replicaset Status endpoints [Conformance] + test/e2e/apps/replica_set.go:176 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Discovery + [BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:08.866 - Jul 29 16:59:08.866: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename discovery 07/29/23 16:59:08.878 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:08.917 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:08.92 - [BeforeEach] [sig-api-machinery] Discovery + STEP: Creating a kubernetes client 08/24/23 13:03:11.695 + Aug 24 13:03:11.695: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replicaset 08/24/23 13:03:11.697 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:11.747 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:11.76 + [BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] Discovery - test/e2e/apimachinery/discovery.go:43 - STEP: Setting up server cert 07/29/23 16:59:08.93 - [It] should validate PreferredVersion for each APIGroup [Conformance] - test/e2e/apimachinery/discovery.go:122 - Jul 29 16:59:09.546: INFO: Checking APIGroup: apiregistration.k8s.io - Jul 29 16:59:09.548: INFO: PreferredVersion.GroupVersion: apiregistration.k8s.io/v1 - Jul 29 16:59:09.548: INFO: Versions found [{apiregistration.k8s.io/v1 v1}] - Jul 29 16:59:09.548: INFO: apiregistration.k8s.io/v1 matches apiregistration.k8s.io/v1 - Jul 29 16:59:09.548: INFO: Checking APIGroup: apps - Jul 29 16:59:09.550: INFO: PreferredVersion.GroupVersion: apps/v1 - Jul 29 16:59:09.550: INFO: Versions found [{apps/v1 v1}] - Jul 29 16:59:09.550: INFO: apps/v1 matches apps/v1 - Jul 29 16:59:09.550: INFO: Checking APIGroup: events.k8s.io - Jul 29 16:59:09.551: INFO: PreferredVersion.GroupVersion: events.k8s.io/v1 - Jul 29 16:59:09.551: INFO: Versions found [{events.k8s.io/v1 v1}] - Jul 29 16:59:09.551: INFO: events.k8s.io/v1 matches events.k8s.io/v1 - Jul 29 16:59:09.551: INFO: Checking APIGroup: authentication.k8s.io - Jul 29 16:59:09.553: INFO: PreferredVersion.GroupVersion: authentication.k8s.io/v1 - Jul 29 16:59:09.553: INFO: Versions found [{authentication.k8s.io/v1 v1}] - Jul 29 16:59:09.553: INFO: authentication.k8s.io/v1 matches authentication.k8s.io/v1 - Jul 29 16:59:09.553: INFO: Checking APIGroup: authorization.k8s.io - Jul 29 16:59:09.555: INFO: PreferredVersion.GroupVersion: authorization.k8s.io/v1 - Jul 29 16:59:09.555: INFO: Versions found [{authorization.k8s.io/v1 v1}] - Jul 29 16:59:09.555: INFO: authorization.k8s.io/v1 matches authorization.k8s.io/v1 - Jul 29 16:59:09.555: INFO: Checking APIGroup: autoscaling - Jul 29 16:59:09.557: INFO: PreferredVersion.GroupVersion: autoscaling/v2 - Jul 29 16:59:09.557: INFO: Versions found [{autoscaling/v2 v2} {autoscaling/v1 v1}] - Jul 29 16:59:09.557: INFO: autoscaling/v2 matches autoscaling/v2 - Jul 29 16:59:09.557: INFO: Checking APIGroup: batch - Jul 29 16:59:09.558: INFO: PreferredVersion.GroupVersion: batch/v1 - Jul 29 16:59:09.558: INFO: Versions found [{batch/v1 v1}] - Jul 29 16:59:09.558: INFO: batch/v1 matches batch/v1 - Jul 29 16:59:09.558: INFO: Checking APIGroup: certificates.k8s.io - Jul 29 16:59:09.560: INFO: PreferredVersion.GroupVersion: certificates.k8s.io/v1 - Jul 29 16:59:09.560: INFO: Versions found [{certificates.k8s.io/v1 v1}] - Jul 29 16:59:09.560: INFO: certificates.k8s.io/v1 matches certificates.k8s.io/v1 - Jul 29 16:59:09.560: INFO: Checking APIGroup: networking.k8s.io - Jul 29 16:59:09.562: INFO: PreferredVersion.GroupVersion: networking.k8s.io/v1 - Jul 29 16:59:09.562: INFO: Versions found [{networking.k8s.io/v1 v1}] - Jul 29 16:59:09.562: INFO: networking.k8s.io/v1 matches networking.k8s.io/v1 - Jul 29 16:59:09.562: INFO: Checking APIGroup: policy - Jul 29 16:59:09.564: INFO: PreferredVersion.GroupVersion: policy/v1 - Jul 29 16:59:09.564: INFO: Versions found [{policy/v1 v1}] - Jul 29 16:59:09.564: INFO: policy/v1 matches policy/v1 - Jul 29 16:59:09.564: INFO: Checking APIGroup: rbac.authorization.k8s.io - Jul 29 16:59:09.565: INFO: PreferredVersion.GroupVersion: rbac.authorization.k8s.io/v1 - Jul 29 16:59:09.565: INFO: Versions found [{rbac.authorization.k8s.io/v1 v1}] - Jul 29 16:59:09.566: INFO: rbac.authorization.k8s.io/v1 matches rbac.authorization.k8s.io/v1 - Jul 29 16:59:09.568: INFO: Checking APIGroup: storage.k8s.io - Jul 29 16:59:09.573: INFO: PreferredVersion.GroupVersion: storage.k8s.io/v1 - Jul 29 16:59:09.573: INFO: Versions found [{storage.k8s.io/v1 v1} {storage.k8s.io/v1beta1 v1beta1}] - Jul 29 16:59:09.573: INFO: storage.k8s.io/v1 matches storage.k8s.io/v1 - Jul 29 16:59:09.574: INFO: Checking APIGroup: admissionregistration.k8s.io - Jul 29 16:59:09.576: INFO: PreferredVersion.GroupVersion: admissionregistration.k8s.io/v1 - Jul 29 16:59:09.576: INFO: Versions found [{admissionregistration.k8s.io/v1 v1}] - Jul 29 16:59:09.576: INFO: admissionregistration.k8s.io/v1 matches admissionregistration.k8s.io/v1 - Jul 29 16:59:09.577: INFO: Checking APIGroup: apiextensions.k8s.io - Jul 29 16:59:09.579: INFO: PreferredVersion.GroupVersion: apiextensions.k8s.io/v1 - Jul 29 16:59:09.579: INFO: Versions found [{apiextensions.k8s.io/v1 v1}] - Jul 29 16:59:09.579: INFO: apiextensions.k8s.io/v1 matches apiextensions.k8s.io/v1 - Jul 29 16:59:09.579: INFO: Checking APIGroup: scheduling.k8s.io - Jul 29 16:59:09.583: INFO: PreferredVersion.GroupVersion: scheduling.k8s.io/v1 - Jul 29 16:59:09.583: INFO: Versions found [{scheduling.k8s.io/v1 v1}] - Jul 29 16:59:09.583: INFO: scheduling.k8s.io/v1 matches scheduling.k8s.io/v1 - Jul 29 16:59:09.583: INFO: Checking APIGroup: coordination.k8s.io - Jul 29 16:59:09.585: INFO: PreferredVersion.GroupVersion: coordination.k8s.io/v1 - Jul 29 16:59:09.585: INFO: Versions found [{coordination.k8s.io/v1 v1}] - Jul 29 16:59:09.585: INFO: coordination.k8s.io/v1 matches coordination.k8s.io/v1 - Jul 29 16:59:09.585: INFO: Checking APIGroup: node.k8s.io - Jul 29 16:59:09.587: INFO: PreferredVersion.GroupVersion: node.k8s.io/v1 - Jul 29 16:59:09.587: INFO: Versions found [{node.k8s.io/v1 v1}] - Jul 29 16:59:09.587: INFO: node.k8s.io/v1 matches node.k8s.io/v1 - Jul 29 16:59:09.587: INFO: Checking APIGroup: discovery.k8s.io - Jul 29 16:59:09.589: INFO: PreferredVersion.GroupVersion: discovery.k8s.io/v1 - Jul 29 16:59:09.589: INFO: Versions found [{discovery.k8s.io/v1 v1}] - Jul 29 16:59:09.589: INFO: discovery.k8s.io/v1 matches discovery.k8s.io/v1 - Jul 29 16:59:09.589: INFO: Checking APIGroup: flowcontrol.apiserver.k8s.io - Jul 29 16:59:09.591: INFO: PreferredVersion.GroupVersion: flowcontrol.apiserver.k8s.io/v1beta3 - Jul 29 16:59:09.591: INFO: Versions found [{flowcontrol.apiserver.k8s.io/v1beta3 v1beta3} {flowcontrol.apiserver.k8s.io/v1beta2 v1beta2}] - Jul 29 16:59:09.591: INFO: flowcontrol.apiserver.k8s.io/v1beta3 matches flowcontrol.apiserver.k8s.io/v1beta3 - Jul 29 16:59:09.591: INFO: Checking APIGroup: cilium.io - Jul 29 16:59:09.593: INFO: PreferredVersion.GroupVersion: cilium.io/v2 - Jul 29 16:59:09.593: INFO: Versions found [{cilium.io/v2 v2} {cilium.io/v2alpha1 v2alpha1}] - Jul 29 16:59:09.594: INFO: cilium.io/v2 matches cilium.io/v2 - [AfterEach] [sig-api-machinery] Discovery + [It] should validate Replicaset Status endpoints [Conformance] + test/e2e/apps/replica_set.go:176 + STEP: Create a Replicaset 08/24/23 13:03:11.779 + STEP: Verify that the required pods have come up. 08/24/23 13:03:11.807 + Aug 24 13:03:11.816: INFO: Pod name sample-pod: Found 0 pods out of 1 + Aug 24 13:03:16.830: INFO: Pod name sample-pod: Found 1 pods out of 1 + STEP: ensuring each pod is running 08/24/23 13:03:16.83 + STEP: Getting /status 08/24/23 13:03:16.83 + Aug 24 13:03:16.845: INFO: Replicaset test-rs has Conditions: [] + STEP: updating the Replicaset Status 08/24/23 13:03:16.845 + Aug 24 13:03:16.878: INFO: updatedStatus.Conditions: []v1.ReplicaSetCondition{v1.ReplicaSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} + STEP: watching for the ReplicaSet status to be updated 08/24/23 13:03:16.878 + Aug 24 13:03:16.887: INFO: Observed &ReplicaSet event: ADDED + Aug 24 13:03:16.887: INFO: Observed &ReplicaSet event: MODIFIED + Aug 24 13:03:16.887: INFO: Observed &ReplicaSet event: MODIFIED + Aug 24 13:03:16.889: INFO: Observed &ReplicaSet event: MODIFIED + Aug 24 13:03:16.890: INFO: Found replicaset test-rs in namespace replicaset-8107 with labels: map[name:sample-pod pod:httpd] annotations: map[] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] + Aug 24 13:03:16.890: INFO: Replicaset test-rs has an updated status + STEP: patching the Replicaset Status 08/24/23 13:03:16.89 + Aug 24 13:03:16.891: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} + Aug 24 13:03:16.923: INFO: Patched status conditions: []v1.ReplicaSetCondition{v1.ReplicaSetCondition{Type:"StatusPatched", Status:"True", LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} + STEP: watching for the Replicaset status to be patched 08/24/23 13:03:16.924 + Aug 24 13:03:16.940: INFO: Observed &ReplicaSet event: ADDED + Aug 24 13:03:16.940: INFO: Observed &ReplicaSet event: MODIFIED + Aug 24 13:03:16.941: INFO: Observed &ReplicaSet event: MODIFIED + Aug 24 13:03:16.941: INFO: Observed &ReplicaSet event: MODIFIED + Aug 24 13:03:16.941: INFO: Observed replicaset test-rs in namespace replicaset-8107 with annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} + Aug 24 13:03:16.942: INFO: Observed &ReplicaSet event: MODIFIED + Aug 24 13:03:16.942: INFO: Found replicaset test-rs in namespace replicaset-8107 with labels: map[name:sample-pod pod:httpd] annotations: map[] & Conditions: {StatusPatched True 0001-01-01 00:00:00 +0000 UTC } + Aug 24 13:03:16.943: INFO: Replicaset test-rs has a patched status + [AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:09.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Discovery + Aug 24 13:03:16.943: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Discovery + [DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Discovery + [DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 - STEP: Destroying namespace "discovery-8829" for this suite. 07/29/23 16:59:09.611 + STEP: Destroying namespace "replicaset-8107" for this suite. 08/24/23 13:03:16.965 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] LimitRange - should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] - test/e2e/scheduling/limit_range.go:61 -[BeforeEach] [sig-scheduling] LimitRange +[sig-cli] Kubectl client Kubectl label + should update the label on a resource [Conformance] + test/e2e/kubectl/kubectl.go:1509 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:09.625 -Jul 29 16:59:09.625: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename limitrange 07/29/23 16:59:09.628 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:09.654 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:09.659 -[BeforeEach] [sig-scheduling] LimitRange +STEP: Creating a kubernetes client 08/24/23 13:03:17.01 +Aug 24 13:03:17.010: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 13:03:17.013 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:17.039 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:17.052 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[It] should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] - test/e2e/scheduling/limit_range.go:61 -STEP: Creating a LimitRange 07/29/23 16:59:09.665 -STEP: Setting up watch 07/29/23 16:59:09.665 -STEP: Submitting a LimitRange 07/29/23 16:59:09.774 -STEP: Verifying LimitRange creation was observed 07/29/23 16:59:09.791 -STEP: Fetching the LimitRange to ensure it has proper values 07/29/23 16:59:09.792 -Jul 29 16:59:09.798: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] -Jul 29 16:59:09.799: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] -STEP: Creating a Pod with no resource requirements 07/29/23 16:59:09.799 -STEP: Ensuring Pod has resource requirements applied from LimitRange 07/29/23 16:59:09.812 -Jul 29 16:59:09.821: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] -Jul 29 16:59:09.821: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] -STEP: Creating a Pod with partial resource requirements 07/29/23 16:59:09.822 -STEP: Ensuring Pod has merged resource requirements applied from LimitRange 07/29/23 16:59:09.836 -Jul 29 16:59:09.842: INFO: Verifying requests: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] -Jul 29 16:59:09.842: INFO: Verifying limits: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] -STEP: Failing to create a Pod with less than min resources 07/29/23 16:59:09.842 -STEP: Failing to create a Pod with more than max resources 07/29/23 16:59:09.846 -STEP: Updating a LimitRange 07/29/23 16:59:09.85 -STEP: Verifying LimitRange updating is effective 07/29/23 16:59:09.863 -STEP: Creating a Pod with less than former min resources 07/29/23 16:59:11.87 -STEP: Failing to create a Pod with more than max resources 07/29/23 16:59:11.885 -STEP: Deleting a LimitRange 07/29/23 16:59:11.889 -STEP: Verifying the LimitRange was deleted 07/29/23 16:59:11.934 -Jul 29 16:59:16.946: INFO: limitRange is already deleted -STEP: Creating a Pod with more than former max resources 07/29/23 16:59:16.946 -[AfterEach] [sig-scheduling] LimitRange +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[BeforeEach] Kubectl label + test/e2e/kubectl/kubectl.go:1494 +STEP: creating the pod 08/24/23 13:03:17.057 +Aug 24 13:03:17.058: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 create -f -' +Aug 24 13:03:18.502: INFO: stderr: "" +Aug 24 13:03:18.502: INFO: stdout: "pod/pause created\n" +Aug 24 13:03:18.502: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause] +Aug 24 13:03:18.502: INFO: Waiting up to 5m0s for pod "pause" in namespace "kubectl-8228" to be "running and ready" +Aug 24 13:03:18.508: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 6.237806ms +Aug 24 13:03:18.508: INFO: Error evaluating pod condition running and ready: want pod 'pause' on 'pe9deep4seen-3' to be 'Running' but was 'Pending' +Aug 24 13:03:20.519: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 2.016792907s +Aug 24 13:03:20.519: INFO: Pod "pause" satisfied condition "running and ready" +Aug 24 13:03:20.520: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause] +[It] should update the label on a resource [Conformance] + test/e2e/kubectl/kubectl.go:1509 +STEP: adding the label testing-label with value testing-label-value to a pod 08/24/23 13:03:20.52 +Aug 24 13:03:20.521: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 label pods pause testing-label=testing-label-value' +Aug 24 13:03:20.698: INFO: stderr: "" +Aug 24 13:03:20.698: INFO: stdout: "pod/pause labeled\n" +STEP: verifying the pod has the label testing-label with the value testing-label-value 08/24/23 13:03:20.698 +Aug 24 13:03:20.700: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 get pod pause -L testing-label' +Aug 24 13:03:20.871: INFO: stderr: "" +Aug 24 13:03:20.871: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s testing-label-value\n" +STEP: removing the label testing-label of a pod 08/24/23 13:03:20.871 +Aug 24 13:03:20.871: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 label pods pause testing-label-' +Aug 24 13:03:21.045: INFO: stderr: "" +Aug 24 13:03:21.045: INFO: stdout: "pod/pause unlabeled\n" +STEP: verifying the pod doesn't have the label testing-label 08/24/23 13:03:21.045 +Aug 24 13:03:21.046: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 get pod pause -L testing-label' +Aug 24 13:03:21.193: INFO: stderr: "" +Aug 24 13:03:21.193: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 3s \n" +[AfterEach] Kubectl label + test/e2e/kubectl/kubectl.go:1500 +STEP: using delete to clean up resources 08/24/23 13:03:21.194 +Aug 24 13:03:21.194: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 delete --grace-period=0 --force -f -' +Aug 24 13:03:21.336: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Aug 24 13:03:21.336: INFO: stdout: "pod \"pause\" force deleted\n" +Aug 24 13:03:21.336: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 get rc,svc -l name=pause --no-headers' +Aug 24 13:03:21.515: INFO: stderr: "No resources found in kubectl-8228 namespace.\n" +Aug 24 13:03:21.515: INFO: stdout: "" +Aug 24 13:03:21.516: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 get pods -l name=pause -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Aug 24 13:03:21.664: INFO: stderr: "" +Aug 24 13:03:21.664: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:16.964: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-scheduling] LimitRange +Aug 24 13:03:21.664: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-scheduling] LimitRange +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-scheduling] LimitRange +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "limitrange-8465" for this suite. 07/29/23 16:59:16.981 +STEP: Destroying namespace "kubectl-8228" for this suite. 08/24/23 13:03:21.672 ------------------------------ -• [SLOW TEST] [7.371 seconds] -[sig-scheduling] LimitRange -test/e2e/scheduling/framework.go:40 - should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] - test/e2e/scheduling/limit_range.go:61 +• [4.679 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Kubectl label + test/e2e/kubectl/kubectl.go:1492 + should update the label on a resource [Conformance] + test/e2e/kubectl/kubectl.go:1509 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-scheduling] LimitRange + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:09.625 - Jul 29 16:59:09.625: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename limitrange 07/29/23 16:59:09.628 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:09.654 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:09.659 - [BeforeEach] [sig-scheduling] LimitRange + STEP: Creating a kubernetes client 08/24/23 13:03:17.01 + Aug 24 13:03:17.010: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 13:03:17.013 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:17.039 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:17.052 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [It] should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] - test/e2e/scheduling/limit_range.go:61 - STEP: Creating a LimitRange 07/29/23 16:59:09.665 - STEP: Setting up watch 07/29/23 16:59:09.665 - STEP: Submitting a LimitRange 07/29/23 16:59:09.774 - STEP: Verifying LimitRange creation was observed 07/29/23 16:59:09.791 - STEP: Fetching the LimitRange to ensure it has proper values 07/29/23 16:59:09.792 - Jul 29 16:59:09.798: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] - Jul 29 16:59:09.799: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] - STEP: Creating a Pod with no resource requirements 07/29/23 16:59:09.799 - STEP: Ensuring Pod has resource requirements applied from LimitRange 07/29/23 16:59:09.812 - Jul 29 16:59:09.821: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] - Jul 29 16:59:09.821: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] - STEP: Creating a Pod with partial resource requirements 07/29/23 16:59:09.822 - STEP: Ensuring Pod has merged resource requirements applied from LimitRange 07/29/23 16:59:09.836 - Jul 29 16:59:09.842: INFO: Verifying requests: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] - Jul 29 16:59:09.842: INFO: Verifying limits: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] - STEP: Failing to create a Pod with less than min resources 07/29/23 16:59:09.842 - STEP: Failing to create a Pod with more than max resources 07/29/23 16:59:09.846 - STEP: Updating a LimitRange 07/29/23 16:59:09.85 - STEP: Verifying LimitRange updating is effective 07/29/23 16:59:09.863 - STEP: Creating a Pod with less than former min resources 07/29/23 16:59:11.87 - STEP: Failing to create a Pod with more than max resources 07/29/23 16:59:11.885 - STEP: Deleting a LimitRange 07/29/23 16:59:11.889 - STEP: Verifying the LimitRange was deleted 07/29/23 16:59:11.934 - Jul 29 16:59:16.946: INFO: limitRange is already deleted - STEP: Creating a Pod with more than former max resources 07/29/23 16:59:16.946 - [AfterEach] [sig-scheduling] LimitRange + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [BeforeEach] Kubectl label + test/e2e/kubectl/kubectl.go:1494 + STEP: creating the pod 08/24/23 13:03:17.057 + Aug 24 13:03:17.058: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 create -f -' + Aug 24 13:03:18.502: INFO: stderr: "" + Aug 24 13:03:18.502: INFO: stdout: "pod/pause created\n" + Aug 24 13:03:18.502: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause] + Aug 24 13:03:18.502: INFO: Waiting up to 5m0s for pod "pause" in namespace "kubectl-8228" to be "running and ready" + Aug 24 13:03:18.508: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 6.237806ms + Aug 24 13:03:18.508: INFO: Error evaluating pod condition running and ready: want pod 'pause' on 'pe9deep4seen-3' to be 'Running' but was 'Pending' + Aug 24 13:03:20.519: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 2.016792907s + Aug 24 13:03:20.519: INFO: Pod "pause" satisfied condition "running and ready" + Aug 24 13:03:20.520: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause] + [It] should update the label on a resource [Conformance] + test/e2e/kubectl/kubectl.go:1509 + STEP: adding the label testing-label with value testing-label-value to a pod 08/24/23 13:03:20.52 + Aug 24 13:03:20.521: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 label pods pause testing-label=testing-label-value' + Aug 24 13:03:20.698: INFO: stderr: "" + Aug 24 13:03:20.698: INFO: stdout: "pod/pause labeled\n" + STEP: verifying the pod has the label testing-label with the value testing-label-value 08/24/23 13:03:20.698 + Aug 24 13:03:20.700: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 get pod pause -L testing-label' + Aug 24 13:03:20.871: INFO: stderr: "" + Aug 24 13:03:20.871: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s testing-label-value\n" + STEP: removing the label testing-label of a pod 08/24/23 13:03:20.871 + Aug 24 13:03:20.871: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 label pods pause testing-label-' + Aug 24 13:03:21.045: INFO: stderr: "" + Aug 24 13:03:21.045: INFO: stdout: "pod/pause unlabeled\n" + STEP: verifying the pod doesn't have the label testing-label 08/24/23 13:03:21.045 + Aug 24 13:03:21.046: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 get pod pause -L testing-label' + Aug 24 13:03:21.193: INFO: stderr: "" + Aug 24 13:03:21.193: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 3s \n" + [AfterEach] Kubectl label + test/e2e/kubectl/kubectl.go:1500 + STEP: using delete to clean up resources 08/24/23 13:03:21.194 + Aug 24 13:03:21.194: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 delete --grace-period=0 --force -f -' + Aug 24 13:03:21.336: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" + Aug 24 13:03:21.336: INFO: stdout: "pod \"pause\" force deleted\n" + Aug 24 13:03:21.336: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 get rc,svc -l name=pause --no-headers' + Aug 24 13:03:21.515: INFO: stderr: "No resources found in kubectl-8228 namespace.\n" + Aug 24 13:03:21.515: INFO: stdout: "" + Aug 24 13:03:21.516: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-8228 get pods -l name=pause -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' + Aug 24 13:03:21.664: INFO: stderr: "" + Aug 24 13:03:21.664: INFO: stdout: "" + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:16.964: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-scheduling] LimitRange + Aug 24 13:03:21.664: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-scheduling] LimitRange + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-scheduling] LimitRange + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "limitrange-8465" for this suite. 07/29/23 16:59:16.981 + STEP: Destroying namespace "kubectl-8228" for this suite. 08/24/23 13:03:21.672 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:74 -[BeforeEach] [sig-storage] Projected configMap +[sig-cli] Kubectl client Kubectl server-side dry-run + should check if kubectl can dry-run update Pods [Conformance] + test/e2e/kubectl/kubectl.go:962 +[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:17.013 -Jul 29 16:59:17.013: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 16:59:17.015 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:17.043 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:17.047 -[BeforeEach] [sig-storage] Projected configMap +STEP: Creating a kubernetes client 08/24/23 13:03:21.693 +Aug 24 13:03:21.693: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubectl 08/24/23 13:03:21.695 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:21.727 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:21.733 +[BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:74 -STEP: Creating configMap with name projected-configmap-test-volume-b51f7da9-6f14-493c-bbd7-b775a2452bc7 07/29/23 16:59:17.051 -STEP: Creating a pod to test consume configMaps 07/29/23 16:59:17.062 -Jul 29 16:59:17.077: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917" in namespace "projected-4760" to be "Succeeded or Failed" -Jul 29 16:59:17.084: INFO: Pod "pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917": Phase="Pending", Reason="", readiness=false. Elapsed: 7.017728ms -Jul 29 16:59:19.097: INFO: Pod "pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019449301s -Jul 29 16:59:21.096: INFO: Pod "pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018657716s -STEP: Saw pod success 07/29/23 16:59:21.096 -Jul 29 16:59:21.096: INFO: Pod "pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917" satisfied condition "Succeeded or Failed" -Jul 29 16:59:21.107: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917 container agnhost-container: -STEP: delete the pod 07/29/23 16:59:21.143 -Jul 29 16:59:21.166: INFO: Waiting for pod pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917 to disappear -Jul 29 16:59:21.170: INFO: Pod pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917 no longer exists -[AfterEach] [sig-storage] Projected configMap +[BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 +[It] should check if kubectl can dry-run update Pods [Conformance] + test/e2e/kubectl/kubectl.go:962 +STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 08/24/23 13:03:21.738 +Aug 24 13:03:21.738: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5537 run e2e-test-httpd-pod --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4 --pod-running-timeout=2m0s --labels=run=e2e-test-httpd-pod' +Aug 24 13:03:21.911: INFO: stderr: "" +Aug 24 13:03:21.911: INFO: stdout: "pod/e2e-test-httpd-pod created\n" +STEP: replace the image in the pod with server-side dry-run 08/24/23 13:03:21.911 +Aug 24 13:03:21.912: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5537 patch pod e2e-test-httpd-pod -p {"spec":{"containers":[{"name": "e2e-test-httpd-pod","image": "registry.k8s.io/e2e-test-images/busybox:1.29-4"}]}} --dry-run=server' +Aug 24 13:03:22.668: INFO: stderr: "" +Aug 24 13:03:22.668: INFO: stdout: "pod/e2e-test-httpd-pod patched\n" +STEP: verifying the pod e2e-test-httpd-pod has the right image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 08/24/23 13:03:22.668 +Aug 24 13:03:22.798: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5537 delete pods e2e-test-httpd-pod' +Aug 24 13:03:25.021: INFO: stderr: "" +Aug 24 13:03:25.021: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:21.170: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected configMap +Aug 24 13:03:25.022: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 -STEP: Destroying namespace "projected-4760" for this suite. 07/29/23 16:59:21.176 +STEP: Destroying namespace "kubectl-5537" for this suite. 08/24/23 13:03:25.038 ------------------------------ -• [4.178 seconds] -[sig-storage] Projected configMap -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:74 +• [3.361 seconds] +[sig-cli] Kubectl client +test/e2e/kubectl/framework.go:23 + Kubectl server-side dry-run + test/e2e/kubectl/kubectl.go:956 + should check if kubectl can dry-run update Pods [Conformance] + test/e2e/kubectl/kubectl.go:962 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected configMap + [BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:17.013 - Jul 29 16:59:17.013: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 16:59:17.015 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:17.043 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:17.047 - [BeforeEach] [sig-storage] Projected configMap + STEP: Creating a kubernetes client 08/24/23 13:03:21.693 + Aug 24 13:03:21.693: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubectl 08/24/23 13:03:21.695 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:21.727 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:21.733 + [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:74 - STEP: Creating configMap with name projected-configmap-test-volume-b51f7da9-6f14-493c-bbd7-b775a2452bc7 07/29/23 16:59:17.051 - STEP: Creating a pod to test consume configMaps 07/29/23 16:59:17.062 - Jul 29 16:59:17.077: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917" in namespace "projected-4760" to be "Succeeded or Failed" - Jul 29 16:59:17.084: INFO: Pod "pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917": Phase="Pending", Reason="", readiness=false. Elapsed: 7.017728ms - Jul 29 16:59:19.097: INFO: Pod "pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019449301s - Jul 29 16:59:21.096: INFO: Pod "pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018657716s - STEP: Saw pod success 07/29/23 16:59:21.096 - Jul 29 16:59:21.096: INFO: Pod "pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917" satisfied condition "Succeeded or Failed" - Jul 29 16:59:21.107: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917 container agnhost-container: - STEP: delete the pod 07/29/23 16:59:21.143 - Jul 29 16:59:21.166: INFO: Waiting for pod pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917 to disappear - Jul 29 16:59:21.170: INFO: Pod pod-projected-configmaps-7a56cb7a-1e9a-420c-aec6-62bda3a81917 no longer exists - [AfterEach] [sig-storage] Projected configMap + [BeforeEach] [sig-cli] Kubectl client + test/e2e/kubectl/kubectl.go:274 + [It] should check if kubectl can dry-run update Pods [Conformance] + test/e2e/kubectl/kubectl.go:962 + STEP: running the image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 08/24/23 13:03:21.738 + Aug 24 13:03:21.738: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5537 run e2e-test-httpd-pod --image=registry.k8s.io/e2e-test-images/httpd:2.4.38-4 --pod-running-timeout=2m0s --labels=run=e2e-test-httpd-pod' + Aug 24 13:03:21.911: INFO: stderr: "" + Aug 24 13:03:21.911: INFO: stdout: "pod/e2e-test-httpd-pod created\n" + STEP: replace the image in the pod with server-side dry-run 08/24/23 13:03:21.911 + Aug 24 13:03:21.912: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5537 patch pod e2e-test-httpd-pod -p {"spec":{"containers":[{"name": "e2e-test-httpd-pod","image": "registry.k8s.io/e2e-test-images/busybox:1.29-4"}]}} --dry-run=server' + Aug 24 13:03:22.668: INFO: stderr: "" + Aug 24 13:03:22.668: INFO: stdout: "pod/e2e-test-httpd-pod patched\n" + STEP: verifying the pod e2e-test-httpd-pod has the right image registry.k8s.io/e2e-test-images/httpd:2.4.38-4 08/24/23 13:03:22.668 + Aug 24 13:03:22.798: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=kubectl-5537 delete pods e2e-test-httpd-pod' + Aug 24 13:03:25.021: INFO: stderr: "" + Aug 24 13:03:25.021: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" + [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:21.170: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected configMap + Aug 24 13:03:25.022: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 - STEP: Destroying namespace "projected-4760" for this suite. 07/29/23 16:59:21.176 + STEP: Destroying namespace "kubectl-5537" for this suite. 08/24/23 13:03:25.038 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Watchers - should observe an object deletion if it stops meeting the requirements of the selector [Conformance] - test/e2e/apimachinery/watch.go:257 -[BeforeEach] [sig-api-machinery] Watchers +[sig-storage] Secrets + should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:99 +[BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:21.196 -Jul 29 16:59:21.196: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename watch 07/29/23 16:59:21.198 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:21.229 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:21.233 -[BeforeEach] [sig-api-machinery] Watchers +STEP: Creating a kubernetes client 08/24/23 13:03:25.056 +Aug 24 13:03:25.056: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 13:03:25.06 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:25.093 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:25.097 +[BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 -[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance] - test/e2e/apimachinery/watch.go:257 -STEP: creating a watch on configmaps with a certain label 07/29/23 16:59:21.237 -STEP: creating a new configmap 07/29/23 16:59:21.24 -STEP: modifying the configmap once 07/29/23 16:59:21.248 -STEP: changing the label value of the configmap 07/29/23 16:59:21.261 -STEP: Expecting to observe a delete notification for the watched object 07/29/23 16:59:21.273 -Jul 29 16:59:21.273: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36812 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:21 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:59:21.274: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36813 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:21 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:59:21.274: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36814 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:21 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: modifying the configmap a second time 07/29/23 16:59:21.274 -STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements 07/29/23 16:59:21.285 -STEP: changing the label value of the configmap back 07/29/23 16:59:31.286 -STEP: modifying the configmap a third time 07/29/23 16:59:31.309 -STEP: deleting the configmap 07/29/23 16:59:31.325 -STEP: Expecting to observe an add notification for the watched object when the label value was restored 07/29/23 16:59:31.334 -Jul 29 16:59:31.335: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36871 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:31 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:59:31.335: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36872 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:31 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 16:59:31.336: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36873 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:31 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} -[AfterEach] [sig-api-machinery] Watchers +[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:99 +STEP: Creating secret with name secret-test-68d91b00-e1bd-4433-8a50-1e423d1c1289 08/24/23 13:03:25.174 +STEP: Creating a pod to test consume secrets 08/24/23 13:03:25.192 +Aug 24 13:03:25.220: INFO: Waiting up to 5m0s for pod "pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1" in namespace "secrets-1166" to be "Succeeded or Failed" +Aug 24 13:03:25.228: INFO: Pod "pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1": Phase="Pending", Reason="", readiness=false. Elapsed: 8.402208ms +Aug 24 13:03:27.239: INFO: Pod "pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018980947s +Aug 24 13:03:29.238: INFO: Pod "pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018244836s +STEP: Saw pod success 08/24/23 13:03:29.238 +Aug 24 13:03:29.239: INFO: Pod "pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1" satisfied condition "Succeeded or Failed" +Aug 24 13:03:29.258: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1 container secret-volume-test: +STEP: delete the pod 08/24/23 13:03:29.275 +Aug 24 13:03:29.294: INFO: Waiting for pod pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1 to disappear +Aug 24 13:03:29.301: INFO: Pod pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1 no longer exists +[AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:31.336: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Watchers +Aug 24 13:03:29.301: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Watchers +[DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Watchers +[DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "watch-729" for this suite. 07/29/23 16:59:31.351 +STEP: Destroying namespace "secrets-1166" for this suite. 08/24/23 13:03:29.311 +STEP: Destroying namespace "secret-namespace-3908" for this suite. 08/24/23 13:03:29.324 ------------------------------ -• [SLOW TEST] [10.170 seconds] -[sig-api-machinery] Watchers -test/e2e/apimachinery/framework.go:23 - should observe an object deletion if it stops meeting the requirements of the selector [Conformance] - test/e2e/apimachinery/watch.go:257 +• [4.284 seconds] +[sig-storage] Secrets +test/e2e/common/storage/framework.go:23 + should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:99 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Watchers + [BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:21.196 - Jul 29 16:59:21.196: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename watch 07/29/23 16:59:21.198 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:21.229 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:21.233 - [BeforeEach] [sig-api-machinery] Watchers + STEP: Creating a kubernetes client 08/24/23 13:03:25.056 + Aug 24 13:03:25.056: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 13:03:25.06 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:25.093 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:25.097 + [BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 - [It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance] - test/e2e/apimachinery/watch.go:257 - STEP: creating a watch on configmaps with a certain label 07/29/23 16:59:21.237 - STEP: creating a new configmap 07/29/23 16:59:21.24 - STEP: modifying the configmap once 07/29/23 16:59:21.248 - STEP: changing the label value of the configmap 07/29/23 16:59:21.261 - STEP: Expecting to observe a delete notification for the watched object 07/29/23 16:59:21.273 - Jul 29 16:59:21.273: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36812 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:21 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:59:21.274: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36813 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:21 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:59:21.274: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36814 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:21 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} - STEP: modifying the configmap a second time 07/29/23 16:59:21.274 - STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements 07/29/23 16:59:21.285 - STEP: changing the label value of the configmap back 07/29/23 16:59:31.286 - STEP: modifying the configmap a third time 07/29/23 16:59:31.309 - STEP: deleting the configmap 07/29/23 16:59:31.325 - STEP: Expecting to observe an add notification for the watched object when the label value was restored 07/29/23 16:59:31.334 - Jul 29 16:59:31.335: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36871 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:31 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:59:31.335: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36872 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:31 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 16:59:31.336: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-729 55abfb29-0bec-4f29-a04e-099db77d30af 36873 0 2023-07-29 16:59:21 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2023-07-29 16:59:31 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} - [AfterEach] [sig-api-machinery] Watchers + [It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:99 + STEP: Creating secret with name secret-test-68d91b00-e1bd-4433-8a50-1e423d1c1289 08/24/23 13:03:25.174 + STEP: Creating a pod to test consume secrets 08/24/23 13:03:25.192 + Aug 24 13:03:25.220: INFO: Waiting up to 5m0s for pod "pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1" in namespace "secrets-1166" to be "Succeeded or Failed" + Aug 24 13:03:25.228: INFO: Pod "pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1": Phase="Pending", Reason="", readiness=false. Elapsed: 8.402208ms + Aug 24 13:03:27.239: INFO: Pod "pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018980947s + Aug 24 13:03:29.238: INFO: Pod "pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018244836s + STEP: Saw pod success 08/24/23 13:03:29.238 + Aug 24 13:03:29.239: INFO: Pod "pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1" satisfied condition "Succeeded or Failed" + Aug 24 13:03:29.258: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1 container secret-volume-test: + STEP: delete the pod 08/24/23 13:03:29.275 + Aug 24 13:03:29.294: INFO: Waiting for pod pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1 to disappear + Aug 24 13:03:29.301: INFO: Pod pod-secrets-099ed169-abef-4876-a70c-a06152e9b1d1 no longer exists + [AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:31.336: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Watchers + Aug 24 13:03:29.301: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Watchers + [DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Watchers + [DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "watch-729" for this suite. 07/29/23 16:59:31.351 + STEP: Destroying namespace "secrets-1166" for this suite. 08/24/23 13:03:29.311 + STEP: Destroying namespace "secret-namespace-3908" for this suite. 08/24/23 13:03:29.324 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSS ------------------------------ -[sig-network] EndpointSlice - should have Endpoints and EndpointSlices pointing to API Server [Conformance] - test/e2e/network/endpointslice.go:66 -[BeforeEach] [sig-network] EndpointSlice +[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + should be able to convert a non homogeneous list of CRs [Conformance] + test/e2e/apimachinery/crd_conversion_webhook.go:184 +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:31.369 -Jul 29 16:59:31.370: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename endpointslice 07/29/23 16:59:31.373 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:31.413 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:31.418 -[BeforeEach] [sig-network] EndpointSlice +STEP: Creating a kubernetes client 08/24/23 13:03:29.344 +Aug 24 13:03:29.344: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-webhook 08/24/23 13:03:29.346 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:29.373 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:29.381 +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] EndpointSlice - test/e2e/network/endpointslice.go:52 -[It] should have Endpoints and EndpointSlices pointing to API Server [Conformance] - test/e2e/network/endpointslice.go:66 -Jul 29 16:59:31.441: INFO: Endpoints addresses: [192.168.121.120 192.168.121.211] , ports: [6443] -Jul 29 16:59:31.441: INFO: EndpointSlices addresses: [192.168.121.120 192.168.121.211] , ports: [6443] -[AfterEach] [sig-network] EndpointSlice +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/crd_conversion_webhook.go:128 +STEP: Setting up server cert 08/24/23 13:03:29.39 +STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication 08/24/23 13:03:29.803 +STEP: Deploying the custom resource conversion webhook pod 08/24/23 13:03:29.816 +STEP: Wait for the deployment to be ready 08/24/23 13:03:29.835 +Aug 24 13:03:29.849: INFO: new replicaset for deployment "sample-crd-conversion-webhook-deployment" is yet to be created +STEP: Deploying the webhook service 08/24/23 13:03:31.869 +STEP: Verifying the service has paired with the endpoint 08/24/23 13:03:31.889 +Aug 24 13:03:32.890: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 +[It] should be able to convert a non homogeneous list of CRs [Conformance] + test/e2e/apimachinery/crd_conversion_webhook.go:184 +Aug 24 13:03:32.897: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Creating a v1 custom resource 08/24/23 13:03:35.983 +STEP: Create a v2 custom resource 08/24/23 13:03:36.021 +STEP: List CRs in v1 08/24/23 13:03:36.319 +STEP: List CRs in v2 08/24/23 13:03:36.335 +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:31.442: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] EndpointSlice +Aug 24 13:03:36.905: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/crd_conversion_webhook.go:139 +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] EndpointSlice +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] EndpointSlice +[DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "endpointslice-1101" for this suite. 07/29/23 16:59:31.452 +STEP: Destroying namespace "crd-webhook-9232" for this suite. 08/24/23 13:03:37.012 ------------------------------ -• [0.095 seconds] -[sig-network] EndpointSlice -test/e2e/network/common/framework.go:23 - should have Endpoints and EndpointSlices pointing to API Server [Conformance] - test/e2e/network/endpointslice.go:66 +• [SLOW TEST] [7.685 seconds] +[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should be able to convert a non homogeneous list of CRs [Conformance] + test/e2e/apimachinery/crd_conversion_webhook.go:184 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] EndpointSlice + [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:31.369 - Jul 29 16:59:31.370: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename endpointslice 07/29/23 16:59:31.373 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:31.413 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:31.418 - [BeforeEach] [sig-network] EndpointSlice + STEP: Creating a kubernetes client 08/24/23 13:03:29.344 + Aug 24 13:03:29.344: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-webhook 08/24/23 13:03:29.346 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:29.373 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:29.381 + [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] EndpointSlice - test/e2e/network/endpointslice.go:52 - [It] should have Endpoints and EndpointSlices pointing to API Server [Conformance] - test/e2e/network/endpointslice.go:66 - Jul 29 16:59:31.441: INFO: Endpoints addresses: [192.168.121.120 192.168.121.211] , ports: [6443] - Jul 29 16:59:31.441: INFO: EndpointSlices addresses: [192.168.121.120 192.168.121.211] , ports: [6443] - [AfterEach] [sig-network] EndpointSlice + [BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/crd_conversion_webhook.go:128 + STEP: Setting up server cert 08/24/23 13:03:29.39 + STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication 08/24/23 13:03:29.803 + STEP: Deploying the custom resource conversion webhook pod 08/24/23 13:03:29.816 + STEP: Wait for the deployment to be ready 08/24/23 13:03:29.835 + Aug 24 13:03:29.849: INFO: new replicaset for deployment "sample-crd-conversion-webhook-deployment" is yet to be created + STEP: Deploying the webhook service 08/24/23 13:03:31.869 + STEP: Verifying the service has paired with the endpoint 08/24/23 13:03:31.889 + Aug 24 13:03:32.890: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 + [It] should be able to convert a non homogeneous list of CRs [Conformance] + test/e2e/apimachinery/crd_conversion_webhook.go:184 + Aug 24 13:03:32.897: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Creating a v1 custom resource 08/24/23 13:03:35.983 + STEP: Create a v2 custom resource 08/24/23 13:03:36.021 + STEP: List CRs in v1 08/24/23 13:03:36.319 + STEP: List CRs in v2 08/24/23 13:03:36.335 + [AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:31.442: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] EndpointSlice + Aug 24 13:03:36.905: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/crd_conversion_webhook.go:139 + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] EndpointSlice + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] EndpointSlice + [DeferCleanup (Each)] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "endpointslice-1101" for this suite. 07/29/23 16:59:31.452 + STEP: Destroying namespace "crd-webhook-9232" for this suite. 08/24/23 13:03:37.012 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSS +SSSSSSSSS ------------------------------ -[sig-instrumentation] Events API - should delete a collection of events [Conformance] - test/e2e/instrumentation/events.go:207 -[BeforeEach] [sig-instrumentation] Events API +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: udp [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:93 +[BeforeEach] [sig-network] Networking set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:31.466 -Jul 29 16:59:31.466: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename events 07/29/23 16:59:31.47 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:31.491 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:31.496 -[BeforeEach] [sig-instrumentation] Events API +STEP: Creating a kubernetes client 08/24/23 13:03:37.032 +Aug 24 13:03:37.032: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pod-network-test 08/24/23 13:03:37.036 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:37.076 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:37.081 +[BeforeEach] [sig-network] Networking test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-instrumentation] Events API - test/e2e/instrumentation/events.go:84 -[It] should delete a collection of events [Conformance] - test/e2e/instrumentation/events.go:207 -STEP: Create set of events 07/29/23 16:59:31.502 -STEP: get a list of Events with a label in the current namespace 07/29/23 16:59:31.532 -STEP: delete a list of events 07/29/23 16:59:31.54 -Jul 29 16:59:31.540: INFO: requesting DeleteCollection of events -STEP: check that the list of events matches the requested quantity 07/29/23 16:59:31.577 -[AfterEach] [sig-instrumentation] Events API +[It] should function for intra-pod communication: udp [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:93 +STEP: Performing setup for networking test in namespace pod-network-test-6847 08/24/23 13:03:37.084 +STEP: creating a selector 08/24/23 13:03:37.085 +STEP: Creating the service pods in kubernetes 08/24/23 13:03:37.085 +Aug 24 13:03:37.085: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Aug 24 13:03:37.197: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-6847" to be "running and ready" +Aug 24 13:03:37.241: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 43.776356ms +Aug 24 13:03:37.241: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 13:03:39.252: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.055415427s +Aug 24 13:03:39.253: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 13:03:41.249: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.051762711s +Aug 24 13:03:41.249: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 13:03:43.250: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.052898667s +Aug 24 13:03:43.250: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 13:03:45.249: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.052230035s +Aug 24 13:03:45.249: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 13:03:47.247: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.049612734s +Aug 24 13:03:47.247: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 13:03:49.257: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.059818298s +Aug 24 13:03:49.257: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 13:03:51.249: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.052019759s +Aug 24 13:03:51.249: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 13:03:53.248: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.051470696s +Aug 24 13:03:53.248: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 13:03:55.248: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.050785342s +Aug 24 13:03:55.248: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 13:03:57.254: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.056996786s +Aug 24 13:03:57.254: INFO: The phase of Pod netserver-0 is Running (Ready = false) +Aug 24 13:03:59.248: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 22.050799722s +Aug 24 13:03:59.248: INFO: The phase of Pod netserver-0 is Running (Ready = true) +Aug 24 13:03:59.248: INFO: Pod "netserver-0" satisfied condition "running and ready" +Aug 24 13:03:59.254: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-6847" to be "running and ready" +Aug 24 13:03:59.261: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 6.489882ms +Aug 24 13:03:59.261: INFO: The phase of Pod netserver-1 is Running (Ready = true) +Aug 24 13:03:59.261: INFO: Pod "netserver-1" satisfied condition "running and ready" +Aug 24 13:03:59.266: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-6847" to be "running and ready" +Aug 24 13:03:59.272: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 5.694566ms +Aug 24 13:03:59.272: INFO: The phase of Pod netserver-2 is Running (Ready = true) +Aug 24 13:03:59.272: INFO: Pod "netserver-2" satisfied condition "running and ready" +STEP: Creating test pods 08/24/23 13:03:59.279 +Aug 24 13:03:59.292: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-6847" to be "running" +Aug 24 13:03:59.298: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 5.753689ms +Aug 24 13:04:01.305: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.012853958s +Aug 24 13:04:01.306: INFO: Pod "test-container-pod" satisfied condition "running" +Aug 24 13:04:01.313: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 +Aug 24 13:04:01.313: INFO: Breadth first check of 10.233.64.116 on host 192.168.121.127... +Aug 24 13:04:01.319: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.250:9080/dial?request=hostname&protocol=udp&host=10.233.64.116&port=8081&tries=1'] Namespace:pod-network-test-6847 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 13:04:01.319: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 13:04:01.322: INFO: ExecWithOptions: Clientset creation +Aug 24 13:04:01.322: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-6847/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.250%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.64.116%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) +Aug 24 13:04:01.511: INFO: Waiting for responses: map[] +Aug 24 13:04:01.511: INFO: reached 10.233.64.116 after 0/1 tries +Aug 24 13:04:01.511: INFO: Breadth first check of 10.233.65.245 on host 192.168.121.111... +Aug 24 13:04:01.520: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.250:9080/dial?request=hostname&protocol=udp&host=10.233.65.245&port=8081&tries=1'] Namespace:pod-network-test-6847 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 13:04:01.520: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 13:04:01.524: INFO: ExecWithOptions: Clientset creation +Aug 24 13:04:01.524: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-6847/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.250%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.65.245%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) +Aug 24 13:04:01.659: INFO: Waiting for responses: map[] +Aug 24 13:04:01.659: INFO: reached 10.233.65.245 after 0/1 tries +Aug 24 13:04:01.660: INFO: Breadth first check of 10.233.66.26 on host 192.168.121.130... +Aug 24 13:04:01.667: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.250:9080/dial?request=hostname&protocol=udp&host=10.233.66.26&port=8081&tries=1'] Namespace:pod-network-test-6847 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Aug 24 13:04:01.667: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 13:04:01.669: INFO: ExecWithOptions: Clientset creation +Aug 24 13:04:01.669: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-6847/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.250%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.66.26%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) +Aug 24 13:04:01.797: INFO: Waiting for responses: map[] +Aug 24 13:04:01.797: INFO: reached 10.233.66.26 after 0/1 tries +Aug 24 13:04:01.798: INFO: Going to retry 0 out of 3 pods.... +[AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:31.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-instrumentation] Events API +Aug 24 13:04:01.798: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Networking test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-instrumentation] Events API +[DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-instrumentation] Events API +[DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193 -STEP: Destroying namespace "events-2305" for this suite. 07/29/23 16:59:31.591 +STEP: Destroying namespace "pod-network-test-6847" for this suite. 08/24/23 13:04:01.807 ------------------------------ -• [0.138 seconds] -[sig-instrumentation] Events API -test/e2e/instrumentation/common/framework.go:23 - should delete a collection of events [Conformance] - test/e2e/instrumentation/events.go:207 +• [SLOW TEST] [24.787 seconds] +[sig-network] Networking +test/e2e/common/network/framework.go:23 + Granular Checks: Pods + test/e2e/common/network/networking.go:32 + should function for intra-pod communication: udp [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:93 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-instrumentation] Events API + [BeforeEach] [sig-network] Networking set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:31.466 - Jul 29 16:59:31.466: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename events 07/29/23 16:59:31.47 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:31.491 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:31.496 - [BeforeEach] [sig-instrumentation] Events API + STEP: Creating a kubernetes client 08/24/23 13:03:37.032 + Aug 24 13:03:37.032: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pod-network-test 08/24/23 13:03:37.036 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:03:37.076 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:03:37.081 + [BeforeEach] [sig-network] Networking test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-instrumentation] Events API - test/e2e/instrumentation/events.go:84 - [It] should delete a collection of events [Conformance] - test/e2e/instrumentation/events.go:207 - STEP: Create set of events 07/29/23 16:59:31.502 - STEP: get a list of Events with a label in the current namespace 07/29/23 16:59:31.532 - STEP: delete a list of events 07/29/23 16:59:31.54 - Jul 29 16:59:31.540: INFO: requesting DeleteCollection of events - STEP: check that the list of events matches the requested quantity 07/29/23 16:59:31.577 - [AfterEach] [sig-instrumentation] Events API + [It] should function for intra-pod communication: udp [NodeConformance] [Conformance] + test/e2e/common/network/networking.go:93 + STEP: Performing setup for networking test in namespace pod-network-test-6847 08/24/23 13:03:37.084 + STEP: creating a selector 08/24/23 13:03:37.085 + STEP: Creating the service pods in kubernetes 08/24/23 13:03:37.085 + Aug 24 13:03:37.085: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable + Aug 24 13:03:37.197: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "pod-network-test-6847" to be "running and ready" + Aug 24 13:03:37.241: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 43.776356ms + Aug 24 13:03:37.241: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 13:03:39.252: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.055415427s + Aug 24 13:03:39.253: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 13:03:41.249: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.051762711s + Aug 24 13:03:41.249: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 13:03:43.250: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.052898667s + Aug 24 13:03:43.250: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 13:03:45.249: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.052230035s + Aug 24 13:03:45.249: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 13:03:47.247: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.049612734s + Aug 24 13:03:47.247: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 13:03:49.257: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.059818298s + Aug 24 13:03:49.257: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 13:03:51.249: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.052019759s + Aug 24 13:03:51.249: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 13:03:53.248: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.051470696s + Aug 24 13:03:53.248: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 13:03:55.248: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.050785342s + Aug 24 13:03:55.248: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 13:03:57.254: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.056996786s + Aug 24 13:03:57.254: INFO: The phase of Pod netserver-0 is Running (Ready = false) + Aug 24 13:03:59.248: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 22.050799722s + Aug 24 13:03:59.248: INFO: The phase of Pod netserver-0 is Running (Ready = true) + Aug 24 13:03:59.248: INFO: Pod "netserver-0" satisfied condition "running and ready" + Aug 24 13:03:59.254: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "pod-network-test-6847" to be "running and ready" + Aug 24 13:03:59.261: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 6.489882ms + Aug 24 13:03:59.261: INFO: The phase of Pod netserver-1 is Running (Ready = true) + Aug 24 13:03:59.261: INFO: Pod "netserver-1" satisfied condition "running and ready" + Aug 24 13:03:59.266: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "pod-network-test-6847" to be "running and ready" + Aug 24 13:03:59.272: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 5.694566ms + Aug 24 13:03:59.272: INFO: The phase of Pod netserver-2 is Running (Ready = true) + Aug 24 13:03:59.272: INFO: Pod "netserver-2" satisfied condition "running and ready" + STEP: Creating test pods 08/24/23 13:03:59.279 + Aug 24 13:03:59.292: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "pod-network-test-6847" to be "running" + Aug 24 13:03:59.298: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 5.753689ms + Aug 24 13:04:01.305: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.012853958s + Aug 24 13:04:01.306: INFO: Pod "test-container-pod" satisfied condition "running" + Aug 24 13:04:01.313: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 + Aug 24 13:04:01.313: INFO: Breadth first check of 10.233.64.116 on host 192.168.121.127... + Aug 24 13:04:01.319: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.250:9080/dial?request=hostname&protocol=udp&host=10.233.64.116&port=8081&tries=1'] Namespace:pod-network-test-6847 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 13:04:01.319: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 13:04:01.322: INFO: ExecWithOptions: Clientset creation + Aug 24 13:04:01.322: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-6847/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.250%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.64.116%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) + Aug 24 13:04:01.511: INFO: Waiting for responses: map[] + Aug 24 13:04:01.511: INFO: reached 10.233.64.116 after 0/1 tries + Aug 24 13:04:01.511: INFO: Breadth first check of 10.233.65.245 on host 192.168.121.111... + Aug 24 13:04:01.520: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.250:9080/dial?request=hostname&protocol=udp&host=10.233.65.245&port=8081&tries=1'] Namespace:pod-network-test-6847 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 13:04:01.520: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 13:04:01.524: INFO: ExecWithOptions: Clientset creation + Aug 24 13:04:01.524: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-6847/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.250%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.65.245%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) + Aug 24 13:04:01.659: INFO: Waiting for responses: map[] + Aug 24 13:04:01.659: INFO: reached 10.233.65.245 after 0/1 tries + Aug 24 13:04:01.660: INFO: Breadth first check of 10.233.66.26 on host 192.168.121.130... + Aug 24 13:04:01.667: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.233.66.250:9080/dial?request=hostname&protocol=udp&host=10.233.66.26&port=8081&tries=1'] Namespace:pod-network-test-6847 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} + Aug 24 13:04:01.667: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 13:04:01.669: INFO: ExecWithOptions: Clientset creation + Aug 24 13:04:01.669: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/pod-network-test-6847/pods/test-container-pod/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+-q+-s+%27http%3A%2F%2F10.233.66.250%3A9080%2Fdial%3Frequest%3Dhostname%26protocol%3Dudp%26host%3D10.233.66.26%26port%3D8081%26tries%3D1%27&container=webserver&container=webserver&stderr=true&stdout=true) + Aug 24 13:04:01.797: INFO: Waiting for responses: map[] + Aug 24 13:04:01.797: INFO: reached 10.233.66.26 after 0/1 tries + Aug 24 13:04:01.798: INFO: Going to retry 0 out of 3 pods.... + [AfterEach] [sig-network] Networking test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:31.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-instrumentation] Events API + Aug 24 13:04:01.798: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Networking test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-instrumentation] Events API + [DeferCleanup (Each)] [sig-network] Networking dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-instrumentation] Events API + [DeferCleanup (Each)] [sig-network] Networking tear down framework | framework.go:193 - STEP: Destroying namespace "events-2305" for this suite. 07/29/23 16:59:31.591 + STEP: Destroying namespace "pod-network-test-6847" for this suite. 08/24/23 13:04:01.807 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Pods - should support retrieving logs from the container over websockets [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:618 -[BeforeEach] [sig-node] Pods +[sig-apps] ReplicationController + should get and update a ReplicationController scale [Conformance] + test/e2e/apps/rc.go:402 +[BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:31.608 -Jul 29 16:59:31.608: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 16:59:31.612 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:31.647 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:31.654 -[BeforeEach] [sig-node] Pods +STEP: Creating a kubernetes client 08/24/23 13:04:01.824 +Aug 24 13:04:01.824: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replication-controller 08/24/23 13:04:01.827 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:01.861 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:01.868 +[BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 -[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:618 -Jul 29 16:59:31.659: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: creating the pod 07/29/23 16:59:31.66 -STEP: submitting the pod to kubernetes 07/29/23 16:59:31.661 -Jul 29 16:59:31.675: INFO: Waiting up to 5m0s for pod "pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9" in namespace "pods-1861" to be "running and ready" -Jul 29 16:59:31.717: INFO: Pod "pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9": Phase="Pending", Reason="", readiness=false. Elapsed: 41.848266ms -Jul 29 16:59:31.717: INFO: The phase of Pod pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 16:59:33.728: INFO: Pod "pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9": Phase="Running", Reason="", readiness=true. Elapsed: 2.052966222s -Jul 29 16:59:33.728: INFO: The phase of Pod pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9 is Running (Ready = true) -Jul 29 16:59:33.728: INFO: Pod "pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9" satisfied condition "running and ready" -[AfterEach] [sig-node] Pods +[BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 +[It] should get and update a ReplicationController scale [Conformance] + test/e2e/apps/rc.go:402 +STEP: Creating ReplicationController "e2e-rc-5vx2v" 08/24/23 13:04:01.874 +Aug 24 13:04:01.884: INFO: Get Replication Controller "e2e-rc-5vx2v" to confirm replicas +Aug 24 13:04:02.891: INFO: Get Replication Controller "e2e-rc-5vx2v" to confirm replicas +Aug 24 13:04:02.901: INFO: Found 1 replicas for "e2e-rc-5vx2v" replication controller +STEP: Getting scale subresource for ReplicationController "e2e-rc-5vx2v" 08/24/23 13:04:02.901 +STEP: Updating a scale subresource 08/24/23 13:04:02.907 +STEP: Verifying replicas where modified for replication controller "e2e-rc-5vx2v" 08/24/23 13:04:02.931 +Aug 24 13:04:02.932: INFO: Get Replication Controller "e2e-rc-5vx2v" to confirm replicas +Aug 24 13:04:03.950: INFO: Get Replication Controller "e2e-rc-5vx2v" to confirm replicas +Aug 24 13:04:03.957: INFO: Found 2 replicas for "e2e-rc-5vx2v" replication controller +[AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:33.793: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods +Aug 24 13:04:03.957: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 -STEP: Destroying namespace "pods-1861" for this suite. 07/29/23 16:59:33.805 +STEP: Destroying namespace "replication-controller-9117" for this suite. 08/24/23 13:04:03.969 ------------------------------ -• [2.215 seconds] -[sig-node] Pods -test/e2e/common/node/framework.go:23 - should support retrieving logs from the container over websockets [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:618 +• [2.163 seconds] +[sig-apps] ReplicationController +test/e2e/apps/framework.go:23 + should get and update a ReplicationController scale [Conformance] + test/e2e/apps/rc.go:402 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods + [BeforeEach] [sig-apps] ReplicationController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:31.608 - Jul 29 16:59:31.608: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 16:59:31.612 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:31.647 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:31.654 - [BeforeEach] [sig-node] Pods + STEP: Creating a kubernetes client 08/24/23 13:04:01.824 + Aug 24 13:04:01.824: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replication-controller 08/24/23 13:04:01.827 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:01.861 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:01.868 + [BeforeEach] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 - [It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:618 - Jul 29 16:59:31.659: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: creating the pod 07/29/23 16:59:31.66 - STEP: submitting the pod to kubernetes 07/29/23 16:59:31.661 - Jul 29 16:59:31.675: INFO: Waiting up to 5m0s for pod "pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9" in namespace "pods-1861" to be "running and ready" - Jul 29 16:59:31.717: INFO: Pod "pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9": Phase="Pending", Reason="", readiness=false. Elapsed: 41.848266ms - Jul 29 16:59:31.717: INFO: The phase of Pod pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 16:59:33.728: INFO: Pod "pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9": Phase="Running", Reason="", readiness=true. Elapsed: 2.052966222s - Jul 29 16:59:33.728: INFO: The phase of Pod pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9 is Running (Ready = true) - Jul 29 16:59:33.728: INFO: Pod "pod-logs-websocket-65aab766-1285-4693-ba14-cb49873945c9" satisfied condition "running and ready" - [AfterEach] [sig-node] Pods + [BeforeEach] [sig-apps] ReplicationController + test/e2e/apps/rc.go:57 + [It] should get and update a ReplicationController scale [Conformance] + test/e2e/apps/rc.go:402 + STEP: Creating ReplicationController "e2e-rc-5vx2v" 08/24/23 13:04:01.874 + Aug 24 13:04:01.884: INFO: Get Replication Controller "e2e-rc-5vx2v" to confirm replicas + Aug 24 13:04:02.891: INFO: Get Replication Controller "e2e-rc-5vx2v" to confirm replicas + Aug 24 13:04:02.901: INFO: Found 1 replicas for "e2e-rc-5vx2v" replication controller + STEP: Getting scale subresource for ReplicationController "e2e-rc-5vx2v" 08/24/23 13:04:02.901 + STEP: Updating a scale subresource 08/24/23 13:04:02.907 + STEP: Verifying replicas where modified for replication controller "e2e-rc-5vx2v" 08/24/23 13:04:02.931 + Aug 24 13:04:02.932: INFO: Get Replication Controller "e2e-rc-5vx2v" to confirm replicas + Aug 24 13:04:03.950: INFO: Get Replication Controller "e2e-rc-5vx2v" to confirm replicas + Aug 24 13:04:03.957: INFO: Found 2 replicas for "e2e-rc-5vx2v" replication controller + [AfterEach] [sig-apps] ReplicationController test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:33.793: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods + Aug 24 13:04:03.957: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ReplicationController test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-apps] ReplicationController dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-apps] ReplicationController tear down framework | framework.go:193 - STEP: Destroying namespace "pods-1861" for this suite. 07/29/23 16:59:33.805 + STEP: Destroying namespace "replication-controller-9117" for this suite. 08/24/23 13:04:03.969 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Variable Expansion - should fail substituting values in a volume subpath with backticks [Slow] [Conformance] - test/e2e/common/node/expansion.go:152 -[BeforeEach] [sig-node] Variable Expansion +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should honor timeout [Conformance] + test/e2e/apimachinery/webhook.go:381 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:33.829 -Jul 29 16:59:33.830: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename var-expansion 07/29/23 16:59:33.833 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:33.863 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:33.871 -[BeforeEach] [sig-node] Variable Expansion +STEP: Creating a kubernetes client 08/24/23 13:04:03.989 +Aug 24 13:04:03.989: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 13:04:03.993 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:04.025 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:04.033 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should fail substituting values in a volume subpath with backticks [Slow] [Conformance] - test/e2e/common/node/expansion.go:152 -Jul 29 16:59:33.890: INFO: Waiting up to 2m0s for pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce" in namespace "var-expansion-4356" to be "container 0 failed with reason CreateContainerConfigError" -Jul 29 16:59:33.897: INFO: Pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce": Phase="Pending", Reason="", readiness=false. Elapsed: 7.160178ms -Jul 29 16:59:35.910: INFO: Pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020017541s -Jul 29 16:59:35.910: INFO: Pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce" satisfied condition "container 0 failed with reason CreateContainerConfigError" -Jul 29 16:59:35.910: INFO: Deleting pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce" in namespace "var-expansion-4356" -Jul 29 16:59:35.929: INFO: Wait up to 5m0s for pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce" to be fully deleted -[AfterEach] [sig-node] Variable Expansion +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 13:04:04.064 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 13:04:05.019 +STEP: Deploying the webhook pod 08/24/23 13:04:05.035 +STEP: Wait for the deployment to be ready 08/24/23 13:04:05.056 +Aug 24 13:04:05.068: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +STEP: Deploying the webhook service 08/24/23 13:04:07.127 +STEP: Verifying the service has paired with the endpoint 08/24/23 13:04:07.165 +Aug 24 13:04:08.166: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should honor timeout [Conformance] + test/e2e/apimachinery/webhook.go:381 +STEP: Setting timeout (1s) shorter than webhook latency (5s) 08/24/23 13:04:08.18 +STEP: Registering slow webhook via the AdmissionRegistration API 08/24/23 13:04:08.18 +STEP: Request fails when timeout (1s) is shorter than slow webhook latency (5s) 08/24/23 13:04:08.232 +STEP: Having no error when timeout is shorter than webhook latency and failure policy is ignore 08/24/23 13:04:09.254 +STEP: Registering slow webhook via the AdmissionRegistration API 08/24/23 13:04:09.255 +STEP: Having no error when timeout is longer than webhook latency 08/24/23 13:04:10.318 +STEP: Registering slow webhook via the AdmissionRegistration API 08/24/23 13:04:10.319 +STEP: Having no error when timeout is empty (defaulted to 10s in v1) 08/24/23 13:04:15.381 +STEP: Registering slow webhook via the AdmissionRegistration API 08/24/23 13:04:15.381 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:37.950: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Variable Expansion +Aug 24 13:04:20.437: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Variable Expansion +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "var-expansion-4356" for this suite. 07/29/23 16:59:37.965 +STEP: Destroying namespace "webhook-2664" for this suite. 08/24/23 13:04:20.583 +STEP: Destroying namespace "webhook-2664-markers" for this suite. 08/24/23 13:04:20.606 ------------------------------ -• [4.155 seconds] -[sig-node] Variable Expansion -test/e2e/common/node/framework.go:23 - should fail substituting values in a volume subpath with backticks [Slow] [Conformance] - test/e2e/common/node/expansion.go:152 +• [SLOW TEST] [16.655 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should honor timeout [Conformance] + test/e2e/apimachinery/webhook.go:381 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Variable Expansion + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:33.829 - Jul 29 16:59:33.830: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename var-expansion 07/29/23 16:59:33.833 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:33.863 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:33.871 - [BeforeEach] [sig-node] Variable Expansion + STEP: Creating a kubernetes client 08/24/23 13:04:03.989 + Aug 24 13:04:03.989: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 13:04:03.993 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:04.025 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:04.033 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should fail substituting values in a volume subpath with backticks [Slow] [Conformance] - test/e2e/common/node/expansion.go:152 - Jul 29 16:59:33.890: INFO: Waiting up to 2m0s for pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce" in namespace "var-expansion-4356" to be "container 0 failed with reason CreateContainerConfigError" - Jul 29 16:59:33.897: INFO: Pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce": Phase="Pending", Reason="", readiness=false. Elapsed: 7.160178ms - Jul 29 16:59:35.910: INFO: Pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020017541s - Jul 29 16:59:35.910: INFO: Pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce" satisfied condition "container 0 failed with reason CreateContainerConfigError" - Jul 29 16:59:35.910: INFO: Deleting pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce" in namespace "var-expansion-4356" - Jul 29 16:59:35.929: INFO: Wait up to 5m0s for pod "var-expansion-f30ece62-0b18-428a-909e-ad9ee74abdce" to be fully deleted - [AfterEach] [sig-node] Variable Expansion + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 13:04:04.064 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 13:04:05.019 + STEP: Deploying the webhook pod 08/24/23 13:04:05.035 + STEP: Wait for the deployment to be ready 08/24/23 13:04:05.056 + Aug 24 13:04:05.068: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created + STEP: Deploying the webhook service 08/24/23 13:04:07.127 + STEP: Verifying the service has paired with the endpoint 08/24/23 13:04:07.165 + Aug 24 13:04:08.166: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should honor timeout [Conformance] + test/e2e/apimachinery/webhook.go:381 + STEP: Setting timeout (1s) shorter than webhook latency (5s) 08/24/23 13:04:08.18 + STEP: Registering slow webhook via the AdmissionRegistration API 08/24/23 13:04:08.18 + STEP: Request fails when timeout (1s) is shorter than slow webhook latency (5s) 08/24/23 13:04:08.232 + STEP: Having no error when timeout is shorter than webhook latency and failure policy is ignore 08/24/23 13:04:09.254 + STEP: Registering slow webhook via the AdmissionRegistration API 08/24/23 13:04:09.255 + STEP: Having no error when timeout is longer than webhook latency 08/24/23 13:04:10.318 + STEP: Registering slow webhook via the AdmissionRegistration API 08/24/23 13:04:10.319 + STEP: Having no error when timeout is empty (defaulted to 10s in v1) 08/24/23 13:04:15.381 + STEP: Registering slow webhook via the AdmissionRegistration API 08/24/23 13:04:15.381 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:37.950: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Variable Expansion + Aug 24 13:04:20.437: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Variable Expansion + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "var-expansion-4356" for this suite. 07/29/23 16:59:37.965 + STEP: Destroying namespace "webhook-2664" for this suite. 08/24/23 13:04:20.583 + STEP: Destroying namespace "webhook-2664-markers" for this suite. 08/24/23 13:04:20.606 << End Captured GinkgoWriter Output ------------------------------ -[sig-apps] ReplicationController - should test the lifecycle of a ReplicationController [Conformance] - test/e2e/apps/rc.go:110 -[BeforeEach] [sig-apps] ReplicationController +SS +------------------------------ +[sig-api-machinery] Namespaces [Serial] + should patch a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:268 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:37.989 -Jul 29 16:59:37.990: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replication-controller 07/29/23 16:59:37.995 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:38.02 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:38.025 -[BeforeEach] [sig-apps] ReplicationController +STEP: Creating a kubernetes client 08/24/23 13:04:20.648 +Aug 24 13:04:20.648: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename namespaces 08/24/23 13:04:20.659 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:20.706 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:20.711 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 -[It] should test the lifecycle of a ReplicationController [Conformance] - test/e2e/apps/rc.go:110 -STEP: creating a ReplicationController 07/29/23 16:59:38.033 -STEP: waiting for RC to be added 07/29/23 16:59:38.044 -STEP: waiting for available Replicas 07/29/23 16:59:38.044 -STEP: patching ReplicationController 07/29/23 16:59:40.058 -STEP: waiting for RC to be modified 07/29/23 16:59:40.081 -STEP: patching ReplicationController status 07/29/23 16:59:40.082 -STEP: waiting for RC to be modified 07/29/23 16:59:40.096 -STEP: waiting for available Replicas 07/29/23 16:59:40.096 -STEP: fetching ReplicationController status 07/29/23 16:59:40.109 -STEP: patching ReplicationController scale 07/29/23 16:59:40.117 -STEP: waiting for RC to be modified 07/29/23 16:59:40.127 -STEP: waiting for ReplicationController's scale to be the max amount 07/29/23 16:59:40.133 -STEP: fetching ReplicationController; ensuring that it's patched 07/29/23 16:59:42.05 -STEP: updating ReplicationController status 07/29/23 16:59:42.058 -STEP: waiting for RC to be modified 07/29/23 16:59:42.073 -STEP: listing all ReplicationControllers 07/29/23 16:59:42.074 -STEP: checking that ReplicationController has expected values 07/29/23 16:59:42.083 -STEP: deleting ReplicationControllers by collection 07/29/23 16:59:42.084 -STEP: waiting for ReplicationController to have a DELETED watchEvent 07/29/23 16:59:42.097 -[AfterEach] [sig-apps] ReplicationController +[It] should patch a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:268 +STEP: creating a Namespace 08/24/23 13:04:20.716 +STEP: patching the Namespace 08/24/23 13:04:20.759 +STEP: get the Namespace and ensuring it has the label 08/24/23 13:04:20.778 +[AfterEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:42.237: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ReplicationController +Aug 24 13:04:20.788: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ReplicationController +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ReplicationController +[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "replication-controller-6643" for this suite. 07/29/23 16:59:42.244 +STEP: Destroying namespace "namespaces-5995" for this suite. 08/24/23 13:04:20.81 +STEP: Destroying namespace "nspatchtest-701b82db-e218-4ff0-ae3c-f9d51ccd2b90-6645" for this suite. 08/24/23 13:04:20.832 ------------------------------ -• [4.264 seconds] -[sig-apps] ReplicationController -test/e2e/apps/framework.go:23 - should test the lifecycle of a ReplicationController [Conformance] - test/e2e/apps/rc.go:110 +• [0.210 seconds] +[sig-api-machinery] Namespaces [Serial] +test/e2e/apimachinery/framework.go:23 + should patch a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:268 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ReplicationController + [BeforeEach] [sig-api-machinery] Namespaces [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:37.989 - Jul 29 16:59:37.990: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replication-controller 07/29/23 16:59:37.995 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:38.02 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:38.025 - [BeforeEach] [sig-apps] ReplicationController + STEP: Creating a kubernetes client 08/24/23 13:04:20.648 + Aug 24 13:04:20.648: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename namespaces 08/24/23 13:04:20.659 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:20.706 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:20.711 + [BeforeEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 - [It] should test the lifecycle of a ReplicationController [Conformance] - test/e2e/apps/rc.go:110 - STEP: creating a ReplicationController 07/29/23 16:59:38.033 - STEP: waiting for RC to be added 07/29/23 16:59:38.044 - STEP: waiting for available Replicas 07/29/23 16:59:38.044 - STEP: patching ReplicationController 07/29/23 16:59:40.058 - STEP: waiting for RC to be modified 07/29/23 16:59:40.081 - STEP: patching ReplicationController status 07/29/23 16:59:40.082 - STEP: waiting for RC to be modified 07/29/23 16:59:40.096 - STEP: waiting for available Replicas 07/29/23 16:59:40.096 - STEP: fetching ReplicationController status 07/29/23 16:59:40.109 - STEP: patching ReplicationController scale 07/29/23 16:59:40.117 - STEP: waiting for RC to be modified 07/29/23 16:59:40.127 - STEP: waiting for ReplicationController's scale to be the max amount 07/29/23 16:59:40.133 - STEP: fetching ReplicationController; ensuring that it's patched 07/29/23 16:59:42.05 - STEP: updating ReplicationController status 07/29/23 16:59:42.058 - STEP: waiting for RC to be modified 07/29/23 16:59:42.073 - STEP: listing all ReplicationControllers 07/29/23 16:59:42.074 - STEP: checking that ReplicationController has expected values 07/29/23 16:59:42.083 - STEP: deleting ReplicationControllers by collection 07/29/23 16:59:42.084 - STEP: waiting for ReplicationController to have a DELETED watchEvent 07/29/23 16:59:42.097 - [AfterEach] [sig-apps] ReplicationController + [It] should patch a Namespace [Conformance] + test/e2e/apimachinery/namespace.go:268 + STEP: creating a Namespace 08/24/23 13:04:20.716 + STEP: patching the Namespace 08/24/23 13:04:20.759 + STEP: get the Namespace and ensuring it has the label 08/24/23 13:04:20.778 + [AfterEach] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:42.237: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ReplicationController + Aug 24 13:04:20.788: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ReplicationController + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ReplicationController + [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "replication-controller-6643" for this suite. 07/29/23 16:59:42.244 + STEP: Destroying namespace "namespaces-5995" for this suite. 08/24/23 13:04:20.81 + STEP: Destroying namespace "nspatchtest-701b82db-e218-4ff0-ae3c-f9d51ccd2b90-6645" for this suite. 08/24/23 13:04:20.832 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] - updates the published spec when one version gets renamed [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:391 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a replication controller. [Conformance] + test/e2e/apimachinery/resource_quota.go:392 +[BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:42.254 -Jul 29 16:59:42.254: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 16:59:42.257 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:42.279 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:42.282 -[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 13:04:20.861 +Aug 24 13:04:20.861: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename resourcequota 08/24/23 13:04:20.864 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:20.914 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:20.924 +[BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 -[It] updates the published spec when one version gets renamed [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:391 -STEP: set up a multi version CRD 07/29/23 16:59:42.288 -Jul 29 16:59:42.289: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: rename a version 07/29/23 16:59:48.163 -STEP: check the new version name is served 07/29/23 16:59:48.192 -STEP: check the old version name is removed 07/29/23 16:59:50.581 -STEP: check the other version is not changed 07/29/23 16:59:51.507 -[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[It] should create a ResourceQuota and capture the life of a replication controller. [Conformance] + test/e2e/apimachinery/resource_quota.go:392 +STEP: Counting existing ResourceQuota 08/24/23 13:04:20.931 +STEP: Creating a ResourceQuota 08/24/23 13:04:25.944 +STEP: Ensuring resource quota status is calculated 08/24/23 13:04:25.958 +STEP: Creating a ReplicationController 08/24/23 13:04:27.968 +STEP: Ensuring resource quota status captures replication controller creation 08/24/23 13:04:27.989 +STEP: Deleting a ReplicationController 08/24/23 13:04:29.998 +STEP: Ensuring resource quota status released usage 08/24/23 13:04:30.011 +[AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 -Jul 29 16:59:56.143: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +Aug 24 13:04:32.020: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 -STEP: Destroying namespace "crd-publish-openapi-3461" for this suite. 07/29/23 16:59:56.16 +STEP: Destroying namespace "resourcequota-3637" for this suite. 08/24/23 13:04:32.03 ------------------------------ -• [SLOW TEST] [13.916 seconds] -[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +• [SLOW TEST] [11.184 seconds] +[sig-api-machinery] ResourceQuota test/e2e/apimachinery/framework.go:23 - updates the published spec when one version gets renamed [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:391 + should create a ResourceQuota and capture the life of a replication controller. [Conformance] + test/e2e/apimachinery/resource_quota.go:392 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [BeforeEach] [sig-api-machinery] ResourceQuota set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:42.254 - Jul 29 16:59:42.254: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename crd-publish-openapi 07/29/23 16:59:42.257 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:42.279 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:42.282 - [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 13:04:20.861 + Aug 24 13:04:20.861: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename resourcequota 08/24/23 13:04:20.864 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:20.914 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:20.924 + [BeforeEach] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:31 - [It] updates the published spec when one version gets renamed [Conformance] - test/e2e/apimachinery/crd_publish_openapi.go:391 - STEP: set up a multi version CRD 07/29/23 16:59:42.288 - Jul 29 16:59:42.289: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: rename a version 07/29/23 16:59:48.163 - STEP: check the new version name is served 07/29/23 16:59:48.192 - STEP: check the old version name is removed 07/29/23 16:59:50.581 - STEP: check the other version is not changed 07/29/23 16:59:51.507 - [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [It] should create a ResourceQuota and capture the life of a replication controller. [Conformance] + test/e2e/apimachinery/resource_quota.go:392 + STEP: Counting existing ResourceQuota 08/24/23 13:04:20.931 + STEP: Creating a ResourceQuota 08/24/23 13:04:25.944 + STEP: Ensuring resource quota status is calculated 08/24/23 13:04:25.958 + STEP: Creating a ReplicationController 08/24/23 13:04:27.968 + STEP: Ensuring resource quota status captures replication controller creation 08/24/23 13:04:27.989 + STEP: Deleting a ReplicationController 08/24/23 13:04:29.998 + STEP: Ensuring resource quota status released usage 08/24/23 13:04:30.011 + [AfterEach] [sig-api-machinery] ResourceQuota test/e2e/framework/node/init/init.go:32 - Jul 29 16:59:56.143: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + Aug 24 13:04:32.020: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-api-machinery] ResourceQuota tear down framework | framework.go:193 - STEP: Destroying namespace "crd-publish-openapi-3461" for this suite. 07/29/23 16:59:56.16 + STEP: Destroying namespace "resourcequota-3637" for this suite. 08/24/23 13:04:32.03 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSS ------------------------------ [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should deny crd creation [Conformance] - test/e2e/apimachinery/webhook.go:308 + should mutate pod and apply defaults after mutation [Conformance] + test/e2e/apimachinery/webhook.go:264 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 16:59:56.173 -Jul 29 16:59:56.174: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 16:59:56.176 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:56.217 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:56.223 +STEP: Creating a kubernetes client 08/24/23 13:04:32.047 +Aug 24 13:04:32.047: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 13:04:32.049 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:32.075 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:32.081 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 16:59:56.247 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:59:57.067 -STEP: Deploying the webhook pod 07/29/23 16:59:57.083 -STEP: Wait for the deployment to be ready 07/29/23 16:59:57.105 -Jul 29 16:59:57.116: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -STEP: Deploying the webhook service 07/29/23 16:59:59.137 -STEP: Verifying the service has paired with the endpoint 07/29/23 16:59:59.155 -Jul 29 17:00:00.156: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should deny crd creation [Conformance] - test/e2e/apimachinery/webhook.go:308 -STEP: Registering the crd webhook via the AdmissionRegistration API 07/29/23 17:00:00.162 -STEP: Creating a custom resource definition that should be denied by the webhook 07/29/23 17:00:00.19 -Jul 29 17:00:00.190: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 +STEP: Setting up server cert 08/24/23 13:04:32.114 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 13:04:33.621 +STEP: Deploying the webhook pod 08/24/23 13:04:33.631 +STEP: Wait for the deployment to be ready 08/24/23 13:04:33.656 +Aug 24 13:04:33.670: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +STEP: Deploying the webhook service 08/24/23 13:04:35.687 +STEP: Verifying the service has paired with the endpoint 08/24/23 13:04:35.715 +Aug 24 13:04:36.717: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate pod and apply defaults after mutation [Conformance] + test/e2e/apimachinery/webhook.go:264 +STEP: Registering the mutating pod webhook via the AdmissionRegistration API 08/24/23 13:04:36.728 +STEP: create a pod that should be updated by the webhook 08/24/23 13:04:36.773 [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 17:00:00.223: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 13:04:36.858: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:105 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -32381,43 +32366,42 @@ Jul 29 17:00:00.223: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-5547" for this suite. 07/29/23 17:00:00.336 -STEP: Destroying namespace "webhook-5547-markers" for this suite. 07/29/23 17:00:00.363 +STEP: Destroying namespace "webhook-6354" for this suite. 08/24/23 13:04:37.007 +STEP: Destroying namespace "webhook-6354-markers" for this suite. 08/24/23 13:04:37.048 ------------------------------ -• [4.208 seconds] +• [SLOW TEST] [5.037 seconds] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/framework.go:23 - should deny crd creation [Conformance] - test/e2e/apimachinery/webhook.go:308 + should mutate pod and apply defaults after mutation [Conformance] + test/e2e/apimachinery/webhook.go:264 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 16:59:56.173 - Jul 29 16:59:56.174: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 16:59:56.176 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 16:59:56.217 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 16:59:56.223 + STEP: Creating a kubernetes client 08/24/23 13:04:32.047 + Aug 24 13:04:32.047: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 13:04:32.049 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:32.075 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:32.081 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 16:59:56.247 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 16:59:57.067 - STEP: Deploying the webhook pod 07/29/23 16:59:57.083 - STEP: Wait for the deployment to be ready 07/29/23 16:59:57.105 - Jul 29 16:59:57.116: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created - STEP: Deploying the webhook service 07/29/23 16:59:59.137 - STEP: Verifying the service has paired with the endpoint 07/29/23 16:59:59.155 - Jul 29 17:00:00.156: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should deny crd creation [Conformance] - test/e2e/apimachinery/webhook.go:308 - STEP: Registering the crd webhook via the AdmissionRegistration API 07/29/23 17:00:00.162 - STEP: Creating a custom resource definition that should be denied by the webhook 07/29/23 17:00:00.19 - Jul 29 17:00:00.190: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 + STEP: Setting up server cert 08/24/23 13:04:32.114 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 13:04:33.621 + STEP: Deploying the webhook pod 08/24/23 13:04:33.631 + STEP: Wait for the deployment to be ready 08/24/23 13:04:33.656 + Aug 24 13:04:33.670: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created + STEP: Deploying the webhook service 08/24/23 13:04:35.687 + STEP: Verifying the service has paired with the endpoint 08/24/23 13:04:35.715 + Aug 24 13:04:36.717: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should mutate pod and apply defaults after mutation [Conformance] + test/e2e/apimachinery/webhook.go:264 + STEP: Registering the mutating pod webhook via the AdmissionRegistration API 08/24/23 13:04:36.728 + STEP: create a pod that should be updated by the webhook 08/24/23 13:04:36.773 [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 17:00:00.223: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 13:04:36.858: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:105 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -32426,3711 +32410,3469 @@ test/e2e/apimachinery/framework.go:23 dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-5547" for this suite. 07/29/23 17:00:00.336 - STEP: Destroying namespace "webhook-5547-markers" for this suite. 07/29/23 17:00:00.363 + STEP: Destroying namespace "webhook-6354" for this suite. 08/24/23 13:04:37.007 + STEP: Destroying namespace "webhook-6354-markers" for this suite. 08/24/23 13:04:37.048 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Namespaces [Serial] - should ensure that all pods are removed when a namespace is deleted [Conformance] - test/e2e/apimachinery/namespace.go:243 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +[sig-network] DNS + should provide DNS for services [Conformance] + test/e2e/network/dns.go:137 +[BeforeEach] [sig-network] DNS set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:00:00.386 -Jul 29 17:00:00.387: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename namespaces 07/29/23 17:00:00.389 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:00:00.419 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:00:00.427 -[BeforeEach] [sig-api-machinery] Namespaces [Serial] +STEP: Creating a kubernetes client 08/24/23 13:04:37.087 +Aug 24 13:04:37.088: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename dns 08/24/23 13:04:37.092 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:37.182 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:37.21 +[BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 -[It] should ensure that all pods are removed when a namespace is deleted [Conformance] - test/e2e/apimachinery/namespace.go:243 -STEP: Creating a test namespace 07/29/23 17:00:00.433 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:00:00.482 -STEP: Creating a pod in the namespace 07/29/23 17:00:00.487 -STEP: Waiting for the pod to have running status 07/29/23 17:00:00.502 -Jul 29 17:00:00.503: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "nsdeletetest-2259" to be "running" -Jul 29 17:00:00.509: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 5.993205ms -Jul 29 17:00:02.518: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.015858084s -Jul 29 17:00:02.519: INFO: Pod "test-pod" satisfied condition "running" -STEP: Deleting the namespace 07/29/23 17:00:02.519 -STEP: Waiting for the namespace to be removed. 07/29/23 17:00:02.53 -STEP: Recreating the namespace 07/29/23 17:00:13.538 -STEP: Verifying there are no pods in the namespace 07/29/23 17:00:13.571 -[AfterEach] [sig-api-machinery] Namespaces [Serial] +[It] should provide DNS for services [Conformance] + test/e2e/network/dns.go:137 +STEP: Creating a test headless service 08/24/23 13:04:37.223 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-7140.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-7140.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search 77.2.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.2.77_udp@PTR;check="$$(dig +tcp +noall +answer +search 77.2.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.2.77_tcp@PTR;sleep 1; done + 08/24/23 13:04:37.276 +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-7140.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-7140.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search 77.2.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.2.77_udp@PTR;check="$$(dig +tcp +noall +answer +search 77.2.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.2.77_tcp@PTR;sleep 1; done + 08/24/23 13:04:37.276 +STEP: creating a pod to probe DNS 08/24/23 13:04:37.276 +STEP: submitting the pod to kubernetes 08/24/23 13:04:37.276 +Aug 24 13:04:37.302: INFO: Waiting up to 15m0s for pod "dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca" in namespace "dns-7140" to be "running" +Aug 24 13:04:37.309: INFO: Pod "dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca": Phase="Pending", Reason="", readiness=false. Elapsed: 7.337895ms +Aug 24 13:04:39.329: INFO: Pod "dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca": Phase="Pending", Reason="", readiness=false. Elapsed: 2.026880179s +Aug 24 13:04:41.319: INFO: Pod "dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca": Phase="Running", Reason="", readiness=true. Elapsed: 4.017151349s +Aug 24 13:04:41.319: INFO: Pod "dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca" satisfied condition "running" +STEP: retrieving the pod 08/24/23 13:04:41.319 +STEP: looking for the results for each expected name from probers 08/24/23 13:04:41.326 +Aug 24 13:04:41.336: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:41.345: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:41.359: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:41.392: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:41.400: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:41.408: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:41.458: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local] + +Aug 24 13:04:46.472: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:46.481: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:46.529: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:46.534: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:46.566: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local] + +Aug 24 13:04:51.483: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:51.495: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:51.554: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:51.565: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:51.633: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local] + +Aug 24 13:04:56.467: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:56.475: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:56.535: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:56.545: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:04:56.598: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local] + +Aug 24 13:05:01.471: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:05:01.481: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:05:01.556: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:05:01.564: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:05:01.624: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local] + +Aug 24 13:05:06.480: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:05:06.489: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:05:06.545: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:05:06.555: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) +Aug 24 13:05:06.613: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local] + +Aug 24 13:05:11.663: INFO: DNS probes using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca succeeded + +STEP: deleting the pod 08/24/23 13:05:11.663 +STEP: deleting the test service 08/24/23 13:05:11.744 +STEP: deleting the test headless service 08/24/23 13:05:11.881 +[AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 -Jul 29 17:00:13.576: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +Aug 24 13:05:11.937: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +[DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] +[DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 -STEP: Destroying namespace "namespaces-3192" for this suite. 07/29/23 17:00:13.585 -STEP: Destroying namespace "nsdeletetest-2259" for this suite. 07/29/23 17:00:13.596 -Jul 29 17:00:13.601: INFO: Namespace nsdeletetest-2259 was already deleted -STEP: Destroying namespace "nsdeletetest-6286" for this suite. 07/29/23 17:00:13.601 +STEP: Destroying namespace "dns-7140" for this suite. 08/24/23 13:05:11.953 ------------------------------ -• [SLOW TEST] [13.225 seconds] -[sig-api-machinery] Namespaces [Serial] -test/e2e/apimachinery/framework.go:23 - should ensure that all pods are removed when a namespace is deleted [Conformance] - test/e2e/apimachinery/namespace.go:243 +• [SLOW TEST] [34.889 seconds] +[sig-network] DNS +test/e2e/network/common/framework.go:23 + should provide DNS for services [Conformance] + test/e2e/network/dns.go:137 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Namespaces [Serial] + [BeforeEach] [sig-network] DNS set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:00:00.386 - Jul 29 17:00:00.387: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename namespaces 07/29/23 17:00:00.389 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:00:00.419 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:00:00.427 - [BeforeEach] [sig-api-machinery] Namespaces [Serial] + STEP: Creating a kubernetes client 08/24/23 13:04:37.087 + Aug 24 13:04:37.088: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename dns 08/24/23 13:04:37.092 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:04:37.182 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:04:37.21 + [BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 - [It] should ensure that all pods are removed when a namespace is deleted [Conformance] - test/e2e/apimachinery/namespace.go:243 - STEP: Creating a test namespace 07/29/23 17:00:00.433 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:00:00.482 - STEP: Creating a pod in the namespace 07/29/23 17:00:00.487 - STEP: Waiting for the pod to have running status 07/29/23 17:00:00.502 - Jul 29 17:00:00.503: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "nsdeletetest-2259" to be "running" - Jul 29 17:00:00.509: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 5.993205ms - Jul 29 17:00:02.518: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.015858084s - Jul 29 17:00:02.519: INFO: Pod "test-pod" satisfied condition "running" - STEP: Deleting the namespace 07/29/23 17:00:02.519 - STEP: Waiting for the namespace to be removed. 07/29/23 17:00:02.53 - STEP: Recreating the namespace 07/29/23 17:00:13.538 - STEP: Verifying there are no pods in the namespace 07/29/23 17:00:13.571 - [AfterEach] [sig-api-machinery] Namespaces [Serial] + [It] should provide DNS for services [Conformance] + test/e2e/network/dns.go:137 + STEP: Creating a test headless service 08/24/23 13:04:37.223 + STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-7140.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-7140.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search 77.2.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.2.77_udp@PTR;check="$$(dig +tcp +noall +answer +search 77.2.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.2.77_tcp@PTR;sleep 1; done + 08/24/23 13:04:37.276 + STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-7140.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-7140.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-7140.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-7140.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-7140.svc.cluster.local;check="$$(dig +notcp +noall +answer +search 77.2.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.2.77_udp@PTR;check="$$(dig +tcp +noall +answer +search 77.2.233.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.233.2.77_tcp@PTR;sleep 1; done + 08/24/23 13:04:37.276 + STEP: creating a pod to probe DNS 08/24/23 13:04:37.276 + STEP: submitting the pod to kubernetes 08/24/23 13:04:37.276 + Aug 24 13:04:37.302: INFO: Waiting up to 15m0s for pod "dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca" in namespace "dns-7140" to be "running" + Aug 24 13:04:37.309: INFO: Pod "dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca": Phase="Pending", Reason="", readiness=false. Elapsed: 7.337895ms + Aug 24 13:04:39.329: INFO: Pod "dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca": Phase="Pending", Reason="", readiness=false. Elapsed: 2.026880179s + Aug 24 13:04:41.319: INFO: Pod "dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca": Phase="Running", Reason="", readiness=true. Elapsed: 4.017151349s + Aug 24 13:04:41.319: INFO: Pod "dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca" satisfied condition "running" + STEP: retrieving the pod 08/24/23 13:04:41.319 + STEP: looking for the results for each expected name from probers 08/24/23 13:04:41.326 + Aug 24 13:04:41.336: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:41.345: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:41.359: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:41.392: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:41.400: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:41.408: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:41.458: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-7140.svc.cluster.local] + + Aug 24 13:04:46.472: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:46.481: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:46.529: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:46.534: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:46.566: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local] + + Aug 24 13:04:51.483: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:51.495: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:51.554: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:51.565: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:51.633: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local] + + Aug 24 13:04:56.467: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:56.475: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:56.535: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:56.545: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:04:56.598: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local] + + Aug 24 13:05:01.471: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:05:01.481: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:05:01.556: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:05:01.564: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:05:01.624: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local] + + Aug 24 13:05:06.480: INFO: Unable to read wheezy_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:05:06.489: INFO: Unable to read wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:05:06.545: INFO: Unable to read jessie_udp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:05:06.555: INFO: Unable to read jessie_tcp@dns-test-service.dns-7140.svc.cluster.local from pod dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca: the server could not find the requested resource (get pods dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca) + Aug 24 13:05:06.613: INFO: Lookups using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca failed for: [wheezy_udp@dns-test-service.dns-7140.svc.cluster.local wheezy_tcp@dns-test-service.dns-7140.svc.cluster.local jessie_udp@dns-test-service.dns-7140.svc.cluster.local jessie_tcp@dns-test-service.dns-7140.svc.cluster.local] + + Aug 24 13:05:11.663: INFO: DNS probes using dns-7140/dns-test-cce890e7-06ce-4fe4-8dfb-3be1d93e73ca succeeded + + STEP: deleting the pod 08/24/23 13:05:11.663 + STEP: deleting the test service 08/24/23 13:05:11.744 + STEP: deleting the test headless service 08/24/23 13:05:11.881 + [AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 - Jul 29 17:00:13.576: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + Aug 24 13:05:11.937: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + [DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Namespaces [Serial] + [DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 - STEP: Destroying namespace "namespaces-3192" for this suite. 07/29/23 17:00:13.585 - STEP: Destroying namespace "nsdeletetest-2259" for this suite. 07/29/23 17:00:13.596 - Jul 29 17:00:13.601: INFO: Namespace nsdeletetest-2259 was already deleted - STEP: Destroying namespace "nsdeletetest-6286" for this suite. 07/29/23 17:00:13.601 + STEP: Destroying namespace "dns-7140" for this suite. 08/24/23 13:05:11.953 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSS +SSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should test the lifecycle of an Endpoint [Conformance] - test/e2e/network/service.go:3244 -[BeforeEach] [sig-network] Services +[sig-apps] DisruptionController + should update/patch PodDisruptionBudget status [Conformance] + test/e2e/apps/disruption.go:164 +[BeforeEach] [sig-apps] DisruptionController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:00:13.615 -Jul 29 17:00:13.615: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 17:00:13.62 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:00:13.66 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:00:13.667 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 13:05:11.986 +Aug 24 13:05:11.986: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename disruption 08/24/23 13:05:11.995 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:05:12.044 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:05:12.055 +[BeforeEach] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should test the lifecycle of an Endpoint [Conformance] - test/e2e/network/service.go:3244 -STEP: creating an Endpoint 07/29/23 17:00:13.681 -STEP: waiting for available Endpoint 07/29/23 17:00:13.69 -STEP: listing all Endpoints 07/29/23 17:00:13.693 -STEP: updating the Endpoint 07/29/23 17:00:13.698 -STEP: fetching the Endpoint 07/29/23 17:00:13.709 -STEP: patching the Endpoint 07/29/23 17:00:13.716 -STEP: fetching the Endpoint 07/29/23 17:00:13.732 -STEP: deleting the Endpoint by Collection 07/29/23 17:00:13.738 -STEP: waiting for Endpoint deletion 07/29/23 17:00:13.751 -STEP: fetching the Endpoint 07/29/23 17:00:13.753 -[AfterEach] [sig-network] Services +[BeforeEach] [sig-apps] DisruptionController + test/e2e/apps/disruption.go:72 +[It] should update/patch PodDisruptionBudget status [Conformance] + test/e2e/apps/disruption.go:164 +STEP: Waiting for the pdb to be processed 08/24/23 13:05:12.083 +STEP: Updating PodDisruptionBudget status 08/24/23 13:05:14.107 +STEP: Waiting for all pods to be running 08/24/23 13:05:14.122 +Aug 24 13:05:14.135: INFO: running pods: 0 < 1 +STEP: locating a running pod 08/24/23 13:05:16.144 +STEP: Waiting for the pdb to be processed 08/24/23 13:05:16.166 +STEP: Patching PodDisruptionBudget status 08/24/23 13:05:16.19 +STEP: Waiting for the pdb to be processed 08/24/23 13:05:16.211 +[AfterEach] [sig-apps] DisruptionController test/e2e/framework/node/init/init.go:32 -Jul 29 17:00:13.760: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 13:05:16.217: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-apps] DisruptionController dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-apps] DisruptionController tear down framework | framework.go:193 -STEP: Destroying namespace "services-111" for this suite. 07/29/23 17:00:13.766 +STEP: Destroying namespace "disruption-5159" for this suite. 08/24/23 13:05:16.227 ------------------------------ -• [0.163 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should test the lifecycle of an Endpoint [Conformance] - test/e2e/network/service.go:3244 +• [4.253 seconds] +[sig-apps] DisruptionController +test/e2e/apps/framework.go:23 + should update/patch PodDisruptionBudget status [Conformance] + test/e2e/apps/disruption.go:164 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-apps] DisruptionController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:00:13.615 - Jul 29 17:00:13.615: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 17:00:13.62 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:00:13.66 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:00:13.667 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 13:05:11.986 + Aug 24 13:05:11.986: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename disruption 08/24/23 13:05:11.995 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:05:12.044 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:05:12.055 + [BeforeEach] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should test the lifecycle of an Endpoint [Conformance] - test/e2e/network/service.go:3244 - STEP: creating an Endpoint 07/29/23 17:00:13.681 - STEP: waiting for available Endpoint 07/29/23 17:00:13.69 - STEP: listing all Endpoints 07/29/23 17:00:13.693 - STEP: updating the Endpoint 07/29/23 17:00:13.698 - STEP: fetching the Endpoint 07/29/23 17:00:13.709 - STEP: patching the Endpoint 07/29/23 17:00:13.716 - STEP: fetching the Endpoint 07/29/23 17:00:13.732 - STEP: deleting the Endpoint by Collection 07/29/23 17:00:13.738 - STEP: waiting for Endpoint deletion 07/29/23 17:00:13.751 - STEP: fetching the Endpoint 07/29/23 17:00:13.753 - [AfterEach] [sig-network] Services + [BeforeEach] [sig-apps] DisruptionController + test/e2e/apps/disruption.go:72 + [It] should update/patch PodDisruptionBudget status [Conformance] + test/e2e/apps/disruption.go:164 + STEP: Waiting for the pdb to be processed 08/24/23 13:05:12.083 + STEP: Updating PodDisruptionBudget status 08/24/23 13:05:14.107 + STEP: Waiting for all pods to be running 08/24/23 13:05:14.122 + Aug 24 13:05:14.135: INFO: running pods: 0 < 1 + STEP: locating a running pod 08/24/23 13:05:16.144 + STEP: Waiting for the pdb to be processed 08/24/23 13:05:16.166 + STEP: Patching PodDisruptionBudget status 08/24/23 13:05:16.19 + STEP: Waiting for the pdb to be processed 08/24/23 13:05:16.211 + [AfterEach] [sig-apps] DisruptionController test/e2e/framework/node/init/init.go:32 - Jul 29 17:00:13.760: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 13:05:16.217: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-apps] DisruptionController dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-apps] DisruptionController tear down framework | framework.go:193 - STEP: Destroying namespace "services-111" for this suite. 07/29/23 17:00:13.766 + STEP: Destroying namespace "disruption-5159" for this suite. 08/24/23 13:05:16.227 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] ConfigMap - should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:423 -[BeforeEach] [sig-storage] ConfigMap +[sig-api-machinery] Watchers + should receive events on concurrent watches in same order [Conformance] + test/e2e/apimachinery/watch.go:334 +[BeforeEach] [sig-api-machinery] Watchers set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:00:13.779 -Jul 29 17:00:13.779: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 17:00:13.783 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:00:13.81 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:00:13.815 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 13:05:16.25 +Aug 24 13:05:16.250: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename watch 08/24/23 13:05:16.252 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:05:16.284 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:05:16.289 +[BeforeEach] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:423 -STEP: Creating configMap with name configmap-test-volume-0142a165-112e-42a6-a840-6cbb67ba8979 07/29/23 17:00:13.82 -STEP: Creating a pod to test consume configMaps 07/29/23 17:00:13.827 -Jul 29 17:00:13.839: INFO: Waiting up to 5m0s for pod "pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75" in namespace "configmap-5949" to be "Succeeded or Failed" -Jul 29 17:00:13.845: INFO: Pod "pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75": Phase="Pending", Reason="", readiness=false. Elapsed: 5.284993ms -Jul 29 17:00:15.855: INFO: Pod "pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015266642s -Jul 29 17:00:17.854: INFO: Pod "pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014594156s -STEP: Saw pod success 07/29/23 17:00:17.854 -Jul 29 17:00:17.854: INFO: Pod "pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75" satisfied condition "Succeeded or Failed" -Jul 29 17:00:17.860: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75 container configmap-volume-test: -STEP: delete the pod 07/29/23 17:00:17.872 -Jul 29 17:00:17.893: INFO: Waiting for pod pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75 to disappear -Jul 29 17:00:17.899: INFO: Pod pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75 no longer exists -[AfterEach] [sig-storage] ConfigMap +[It] should receive events on concurrent watches in same order [Conformance] + test/e2e/apimachinery/watch.go:334 +STEP: getting a starting resourceVersion 08/24/23 13:05:16.295 +STEP: starting a background goroutine to produce watch events 08/24/23 13:05:16.304 +STEP: creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order 08/24/23 13:05:16.304 +[AfterEach] [sig-api-machinery] Watchers test/e2e/framework/node/init/init.go:32 -Jul 29 17:00:17.899: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 13:05:19.093: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-api-machinery] Watchers dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-api-machinery] Watchers tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-5949" for this suite. 07/29/23 17:00:17.909 +STEP: Destroying namespace "watch-6129" for this suite. 08/24/23 13:05:19.11 ------------------------------ -• [4.140 seconds] -[sig-storage] ConfigMap -test/e2e/common/storage/framework.go:23 - should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:423 +• [2.916 seconds] +[sig-api-machinery] Watchers +test/e2e/apimachinery/framework.go:23 + should receive events on concurrent watches in same order [Conformance] + test/e2e/apimachinery/watch.go:334 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-api-machinery] Watchers set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:00:13.779 - Jul 29 17:00:13.779: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 17:00:13.783 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:00:13.81 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:00:13.815 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 13:05:16.25 + Aug 24 13:05:16.250: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename watch 08/24/23 13:05:16.252 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:05:16.284 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:05:16.289 + [BeforeEach] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:423 - STEP: Creating configMap with name configmap-test-volume-0142a165-112e-42a6-a840-6cbb67ba8979 07/29/23 17:00:13.82 - STEP: Creating a pod to test consume configMaps 07/29/23 17:00:13.827 - Jul 29 17:00:13.839: INFO: Waiting up to 5m0s for pod "pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75" in namespace "configmap-5949" to be "Succeeded or Failed" - Jul 29 17:00:13.845: INFO: Pod "pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75": Phase="Pending", Reason="", readiness=false. Elapsed: 5.284993ms - Jul 29 17:00:15.855: INFO: Pod "pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015266642s - Jul 29 17:00:17.854: INFO: Pod "pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014594156s - STEP: Saw pod success 07/29/23 17:00:17.854 - Jul 29 17:00:17.854: INFO: Pod "pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75" satisfied condition "Succeeded or Failed" - Jul 29 17:00:17.860: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75 container configmap-volume-test: - STEP: delete the pod 07/29/23 17:00:17.872 - Jul 29 17:00:17.893: INFO: Waiting for pod pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75 to disappear - Jul 29 17:00:17.899: INFO: Pod pod-configmaps-b7eac72c-39c0-4190-8dc7-c84e0a0dae75 no longer exists - [AfterEach] [sig-storage] ConfigMap + [It] should receive events on concurrent watches in same order [Conformance] + test/e2e/apimachinery/watch.go:334 + STEP: getting a starting resourceVersion 08/24/23 13:05:16.295 + STEP: starting a background goroutine to produce watch events 08/24/23 13:05:16.304 + STEP: creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order 08/24/23 13:05:16.304 + [AfterEach] [sig-api-machinery] Watchers test/e2e/framework/node/init/init.go:32 - Jul 29 17:00:17.899: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 13:05:19.093: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-api-machinery] Watchers dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-api-machinery] Watchers tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-5949" for this suite. 07/29/23 17:00:17.909 + STEP: Destroying namespace "watch-6129" for this suite. 08/24/23 13:05:19.11 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints - verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] - test/e2e/scheduling/preemption.go:814 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:00:17.924 -Jul 29 17:00:17.925: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sched-preemption 07/29/23 17:00:17.928 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:00:17.966 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:00:17.971 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:97 -Jul 29 17:00:17.999: INFO: Waiting up to 1m0s for all nodes to be ready -Jul 29 17:01:18.058: INFO: Waiting for terminating namespaces to be deleted... -[BeforeEach] PriorityClass endpoints +[sig-node] Probing container + should have monotonically increasing restart count [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:199 +[BeforeEach] [sig-node] Probing container set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:01:18.065 -Jul 29 17:01:18.065: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sched-preemption-path 07/29/23 17:01:18.069 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:18.103 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:18.11 -[BeforeEach] PriorityClass endpoints +STEP: Creating a kubernetes client 08/24/23 13:05:19.182 +Aug 24 13:05:19.182: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-probe 08/24/23 13:05:19.184 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:05:19.21 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:05:19.215 +[BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] PriorityClass endpoints - test/e2e/scheduling/preemption.go:771 -[It] verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] - test/e2e/scheduling/preemption.go:814 -Jul 29 17:01:18.142: INFO: PriorityClass.scheduling.k8s.io "p1" is invalid: value: Forbidden: may not be changed in an update. -Jul 29 17:01:18.148: INFO: PriorityClass.scheduling.k8s.io "p2" is invalid: value: Forbidden: may not be changed in an update. -[AfterEach] PriorityClass endpoints - test/e2e/framework/node/init/init.go:32 -Jul 29 17:01:18.181: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] PriorityClass endpoints - test/e2e/scheduling/preemption.go:787 -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] +[BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 +[It] should have monotonically increasing restart count [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:199 +STEP: Creating pod liveness-65425213-97d1-4690-99ee-7c74ebbe3964 in namespace container-probe-8132 08/24/23 13:05:19.224 +Aug 24 13:05:19.245: INFO: Waiting up to 5m0s for pod "liveness-65425213-97d1-4690-99ee-7c74ebbe3964" in namespace "container-probe-8132" to be "not pending" +Aug 24 13:05:19.256: INFO: Pod "liveness-65425213-97d1-4690-99ee-7c74ebbe3964": Phase="Pending", Reason="", readiness=false. Elapsed: 10.476536ms +Aug 24 13:05:21.274: INFO: Pod "liveness-65425213-97d1-4690-99ee-7c74ebbe3964": Phase="Running", Reason="", readiness=true. Elapsed: 2.028597097s +Aug 24 13:05:21.274: INFO: Pod "liveness-65425213-97d1-4690-99ee-7c74ebbe3964" satisfied condition "not pending" +Aug 24 13:05:21.274: INFO: Started pod liveness-65425213-97d1-4690-99ee-7c74ebbe3964 in namespace container-probe-8132 +STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 13:05:21.274 +Aug 24 13:05:21.283: INFO: Initial restart count of pod liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is 0 +Aug 24 13:05:41.381: INFO: Restart count of pod container-probe-8132/liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is now 1 (20.097929364s elapsed) +Aug 24 13:06:01.500: INFO: Restart count of pod container-probe-8132/liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is now 2 (40.217119369s elapsed) +Aug 24 13:06:21.582: INFO: Restart count of pod container-probe-8132/liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is now 3 (1m0.299208342s elapsed) +Aug 24 13:06:41.679: INFO: Restart count of pod container-probe-8132/liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is now 4 (1m20.396356058s elapsed) +Aug 24 13:07:41.949: INFO: Restart count of pod container-probe-8132/liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is now 5 (2m20.665536362s elapsed) +STEP: deleting the pod 08/24/23 13:07:41.949 +[AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 -Jul 29 17:01:18.216: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:84 -[DeferCleanup (Each)] PriorityClass endpoints - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] PriorityClass endpoints - dump namespaces | framework.go:196 -[DeferCleanup (Each)] PriorityClass endpoints - tear down framework | framework.go:193 -STEP: Destroying namespace "sched-preemption-path-8810" for this suite. 07/29/23 17:01:18.318 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] +Aug 24 13:07:42.005: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] +[DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] +[DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 -STEP: Destroying namespace "sched-preemption-163" for this suite. 07/29/23 17:01:18.329 +STEP: Destroying namespace "container-probe-8132" for this suite. 08/24/23 13:07:42.016 ------------------------------ -• [SLOW TEST] [60.418 seconds] -[sig-scheduling] SchedulerPreemption [Serial] -test/e2e/scheduling/framework.go:40 - PriorityClass endpoints - test/e2e/scheduling/preemption.go:764 - verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] - test/e2e/scheduling/preemption.go:814 +• [SLOW TEST] [142.853 seconds] +[sig-node] Probing container +test/e2e/common/node/framework.go:23 + should have monotonically increasing restart count [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:199 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:00:17.924 - Jul 29 17:00:17.925: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sched-preemption 07/29/23 17:00:17.928 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:00:17.966 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:00:17.971 - [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:97 - Jul 29 17:00:17.999: INFO: Waiting up to 1m0s for all nodes to be ready - Jul 29 17:01:18.058: INFO: Waiting for terminating namespaces to be deleted... - [BeforeEach] PriorityClass endpoints + [BeforeEach] [sig-node] Probing container set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:01:18.065 - Jul 29 17:01:18.065: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sched-preemption-path 07/29/23 17:01:18.069 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:18.103 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:18.11 - [BeforeEach] PriorityClass endpoints + STEP: Creating a kubernetes client 08/24/23 13:05:19.182 + Aug 24 13:05:19.182: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-probe 08/24/23 13:05:19.184 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:05:19.21 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:05:19.215 + [BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] PriorityClass endpoints - test/e2e/scheduling/preemption.go:771 - [It] verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] - test/e2e/scheduling/preemption.go:814 - Jul 29 17:01:18.142: INFO: PriorityClass.scheduling.k8s.io "p1" is invalid: value: Forbidden: may not be changed in an update. - Jul 29 17:01:18.148: INFO: PriorityClass.scheduling.k8s.io "p2" is invalid: value: Forbidden: may not be changed in an update. - [AfterEach] PriorityClass endpoints - test/e2e/framework/node/init/init.go:32 - Jul 29 17:01:18.181: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] PriorityClass endpoints - test/e2e/scheduling/preemption.go:787 - [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + [BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 + [It] should have monotonically increasing restart count [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:199 + STEP: Creating pod liveness-65425213-97d1-4690-99ee-7c74ebbe3964 in namespace container-probe-8132 08/24/23 13:05:19.224 + Aug 24 13:05:19.245: INFO: Waiting up to 5m0s for pod "liveness-65425213-97d1-4690-99ee-7c74ebbe3964" in namespace "container-probe-8132" to be "not pending" + Aug 24 13:05:19.256: INFO: Pod "liveness-65425213-97d1-4690-99ee-7c74ebbe3964": Phase="Pending", Reason="", readiness=false. Elapsed: 10.476536ms + Aug 24 13:05:21.274: INFO: Pod "liveness-65425213-97d1-4690-99ee-7c74ebbe3964": Phase="Running", Reason="", readiness=true. Elapsed: 2.028597097s + Aug 24 13:05:21.274: INFO: Pod "liveness-65425213-97d1-4690-99ee-7c74ebbe3964" satisfied condition "not pending" + Aug 24 13:05:21.274: INFO: Started pod liveness-65425213-97d1-4690-99ee-7c74ebbe3964 in namespace container-probe-8132 + STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 13:05:21.274 + Aug 24 13:05:21.283: INFO: Initial restart count of pod liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is 0 + Aug 24 13:05:41.381: INFO: Restart count of pod container-probe-8132/liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is now 1 (20.097929364s elapsed) + Aug 24 13:06:01.500: INFO: Restart count of pod container-probe-8132/liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is now 2 (40.217119369s elapsed) + Aug 24 13:06:21.582: INFO: Restart count of pod container-probe-8132/liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is now 3 (1m0.299208342s elapsed) + Aug 24 13:06:41.679: INFO: Restart count of pod container-probe-8132/liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is now 4 (1m20.396356058s elapsed) + Aug 24 13:07:41.949: INFO: Restart count of pod container-probe-8132/liveness-65425213-97d1-4690-99ee-7c74ebbe3964 is now 5 (2m20.665536362s elapsed) + STEP: deleting the pod 08/24/23 13:07:41.949 + [AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 - Jul 29 17:01:18.216: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:84 - [DeferCleanup (Each)] PriorityClass endpoints - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] PriorityClass endpoints - dump namespaces | framework.go:196 - [DeferCleanup (Each)] PriorityClass endpoints - tear down framework | framework.go:193 - STEP: Destroying namespace "sched-preemption-path-8810" for this suite. 07/29/23 17:01:18.318 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + Aug 24 13:07:42.005: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + [DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + [DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 - STEP: Destroying namespace "sched-preemption-163" for this suite. 07/29/23 17:01:18.329 + STEP: Destroying namespace "container-probe-8132" for this suite. 08/24/23 13:07:42.016 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook - should execute poststart http hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:167 -[BeforeEach] [sig-node] Container Lifecycle Hook +[sig-api-machinery] Watchers + should be able to start watching from a specific resource version [Conformance] + test/e2e/apimachinery/watch.go:142 +[BeforeEach] [sig-api-machinery] Watchers set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:01:18.349 -Jul 29 17:01:18.349: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-lifecycle-hook 07/29/23 17:01:18.352 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:18.385 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:18.389 -[BeforeEach] [sig-node] Container Lifecycle Hook +STEP: Creating a kubernetes client 08/24/23 13:07:42.043 +Aug 24 13:07:42.043: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename watch 08/24/23 13:07:42.049 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:07:42.075 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:07:42.088 +[BeforeEach] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:77 -STEP: create the container to handle the HTTPGet hook request. 07/29/23 17:01:18.401 -Jul 29 17:01:18.415: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-3287" to be "running and ready" -Jul 29 17:01:18.424: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 9.153844ms -Jul 29 17:01:18.424: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 17:01:20.432: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017034804s -Jul 29 17:01:20.432: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) -Jul 29 17:01:22.432: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 4.017161856s -Jul 29 17:01:22.433: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) -Jul 29 17:01:22.433: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" -[It] should execute poststart http hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:167 -STEP: create the pod with lifecycle hook 07/29/23 17:01:22.44 -Jul 29 17:01:22.449: INFO: Waiting up to 5m0s for pod "pod-with-poststart-http-hook" in namespace "container-lifecycle-hook-3287" to be "running and ready" -Jul 29 17:01:22.459: INFO: Pod "pod-with-poststart-http-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 9.393427ms -Jul 29 17:01:22.459: INFO: The phase of Pod pod-with-poststart-http-hook is Pending, waiting for it to be Running (with Ready = true) -Jul 29 17:01:24.470: INFO: Pod "pod-with-poststart-http-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019935358s -Jul 29 17:01:24.470: INFO: The phase of Pod pod-with-poststart-http-hook is Pending, waiting for it to be Running (with Ready = true) -Jul 29 17:01:26.467: INFO: Pod "pod-with-poststart-http-hook": Phase="Running", Reason="", readiness=true. Elapsed: 4.017270101s -Jul 29 17:01:26.467: INFO: The phase of Pod pod-with-poststart-http-hook is Running (Ready = true) -Jul 29 17:01:26.467: INFO: Pod "pod-with-poststart-http-hook" satisfied condition "running and ready" -STEP: check poststart hook 07/29/23 17:01:26.472 -STEP: delete the pod with lifecycle hook 07/29/23 17:01:26.503 -Jul 29 17:01:26.514: INFO: Waiting for pod pod-with-poststart-http-hook to disappear -Jul 29 17:01:26.530: INFO: Pod pod-with-poststart-http-hook still exists -Jul 29 17:01:28.530: INFO: Waiting for pod pod-with-poststart-http-hook to disappear -Jul 29 17:01:28.540: INFO: Pod pod-with-poststart-http-hook no longer exists -[AfterEach] [sig-node] Container Lifecycle Hook +[It] should be able to start watching from a specific resource version [Conformance] + test/e2e/apimachinery/watch.go:142 +STEP: creating a new configmap 08/24/23 13:07:42.091 +STEP: modifying the configmap once 08/24/23 13:07:42.103 +STEP: modifying the configmap a second time 08/24/23 13:07:42.113 +STEP: deleting the configmap 08/24/23 13:07:42.123 +STEP: creating a watch on configmaps from the resource version returned by the first update 08/24/23 13:07:42.175 +STEP: Expecting to observe notifications for all changes to the configmap after the first update 08/24/23 13:07:42.178 +Aug 24 13:07:42.178: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-957 0827b2d4-13b6-4958-ae01-fb22c2aa4bc6 35506 0 2023-08-24 13:07:42 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2023-08-24 13:07:42 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Aug 24 13:07:42.179: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-957 0827b2d4-13b6-4958-ae01-fb22c2aa4bc6 35507 0 2023-08-24 13:07:42 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2023-08-24 13:07:42 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers test/e2e/framework/node/init/init.go:32 -Jul 29 17:01:28.540: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook +Aug 24 13:07:42.179: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook +[DeferCleanup (Each)] [sig-api-machinery] Watchers dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Container Lifecycle Hook +[DeferCleanup (Each)] [sig-api-machinery] Watchers tear down framework | framework.go:193 -STEP: Destroying namespace "container-lifecycle-hook-3287" for this suite. 07/29/23 17:01:28.55 +STEP: Destroying namespace "watch-957" for this suite. 08/24/23 13:07:42.212 ------------------------------ -• [SLOW TEST] [10.213 seconds] -[sig-node] Container Lifecycle Hook -test/e2e/common/node/framework.go:23 - when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:46 - should execute poststart http hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:167 +• [0.191 seconds] +[sig-api-machinery] Watchers +test/e2e/apimachinery/framework.go:23 + should be able to start watching from a specific resource version [Conformance] + test/e2e/apimachinery/watch.go:142 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Container Lifecycle Hook + [BeforeEach] [sig-api-machinery] Watchers set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:01:18.349 - Jul 29 17:01:18.349: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-lifecycle-hook 07/29/23 17:01:18.352 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:18.385 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:18.389 - [BeforeEach] [sig-node] Container Lifecycle Hook + STEP: Creating a kubernetes client 08/24/23 13:07:42.043 + Aug 24 13:07:42.043: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename watch 08/24/23 13:07:42.049 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:07:42.075 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:07:42.088 + [BeforeEach] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] when create a pod with lifecycle hook - test/e2e/common/node/lifecycle_hook.go:77 - STEP: create the container to handle the HTTPGet hook request. 07/29/23 17:01:18.401 - Jul 29 17:01:18.415: INFO: Waiting up to 5m0s for pod "pod-handle-http-request" in namespace "container-lifecycle-hook-3287" to be "running and ready" - Jul 29 17:01:18.424: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 9.153844ms - Jul 29 17:01:18.424: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 17:01:20.432: INFO: Pod "pod-handle-http-request": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017034804s - Jul 29 17:01:20.432: INFO: The phase of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) - Jul 29 17:01:22.432: INFO: Pod "pod-handle-http-request": Phase="Running", Reason="", readiness=true. Elapsed: 4.017161856s - Jul 29 17:01:22.433: INFO: The phase of Pod pod-handle-http-request is Running (Ready = true) - Jul 29 17:01:22.433: INFO: Pod "pod-handle-http-request" satisfied condition "running and ready" - [It] should execute poststart http hook properly [NodeConformance] [Conformance] - test/e2e/common/node/lifecycle_hook.go:167 - STEP: create the pod with lifecycle hook 07/29/23 17:01:22.44 - Jul 29 17:01:22.449: INFO: Waiting up to 5m0s for pod "pod-with-poststart-http-hook" in namespace "container-lifecycle-hook-3287" to be "running and ready" - Jul 29 17:01:22.459: INFO: Pod "pod-with-poststart-http-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 9.393427ms - Jul 29 17:01:22.459: INFO: The phase of Pod pod-with-poststart-http-hook is Pending, waiting for it to be Running (with Ready = true) - Jul 29 17:01:24.470: INFO: Pod "pod-with-poststart-http-hook": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019935358s - Jul 29 17:01:24.470: INFO: The phase of Pod pod-with-poststart-http-hook is Pending, waiting for it to be Running (with Ready = true) - Jul 29 17:01:26.467: INFO: Pod "pod-with-poststart-http-hook": Phase="Running", Reason="", readiness=true. Elapsed: 4.017270101s - Jul 29 17:01:26.467: INFO: The phase of Pod pod-with-poststart-http-hook is Running (Ready = true) - Jul 29 17:01:26.467: INFO: Pod "pod-with-poststart-http-hook" satisfied condition "running and ready" - STEP: check poststart hook 07/29/23 17:01:26.472 - STEP: delete the pod with lifecycle hook 07/29/23 17:01:26.503 - Jul 29 17:01:26.514: INFO: Waiting for pod pod-with-poststart-http-hook to disappear - Jul 29 17:01:26.530: INFO: Pod pod-with-poststart-http-hook still exists - Jul 29 17:01:28.530: INFO: Waiting for pod pod-with-poststart-http-hook to disappear - Jul 29 17:01:28.540: INFO: Pod pod-with-poststart-http-hook no longer exists - [AfterEach] [sig-node] Container Lifecycle Hook + [It] should be able to start watching from a specific resource version [Conformance] + test/e2e/apimachinery/watch.go:142 + STEP: creating a new configmap 08/24/23 13:07:42.091 + STEP: modifying the configmap once 08/24/23 13:07:42.103 + STEP: modifying the configmap a second time 08/24/23 13:07:42.113 + STEP: deleting the configmap 08/24/23 13:07:42.123 + STEP: creating a watch on configmaps from the resource version returned by the first update 08/24/23 13:07:42.175 + STEP: Expecting to observe notifications for all changes to the configmap after the first update 08/24/23 13:07:42.178 + Aug 24 13:07:42.178: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-957 0827b2d4-13b6-4958-ae01-fb22c2aa4bc6 35506 0 2023-08-24 13:07:42 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2023-08-24 13:07:42 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} + Aug 24 13:07:42.179: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-957 0827b2d4-13b6-4958-ae01-fb22c2aa4bc6 35507 0 2023-08-24 13:07:42 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2023-08-24 13:07:42 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} + [AfterEach] [sig-api-machinery] Watchers test/e2e/framework/node/init/init.go:32 - Jul 29 17:01:28.540: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + Aug 24 13:07:42.179: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Watchers test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + [DeferCleanup (Each)] [sig-api-machinery] Watchers dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Container Lifecycle Hook + [DeferCleanup (Each)] [sig-api-machinery] Watchers tear down framework | framework.go:193 - STEP: Destroying namespace "container-lifecycle-hook-3287" for this suite. 07/29/23 17:01:28.55 + STEP: Destroying namespace "watch-957" for this suite. 08/24/23 13:07:42.212 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] Deployment - deployment should support proportional scaling [Conformance] - test/e2e/apps/deployment.go:160 -[BeforeEach] [sig-apps] Deployment +[sig-node] Probing container + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:152 +[BeforeEach] [sig-node] Probing container set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:01:28.566 -Jul 29 17:01:28.566: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename deployment 07/29/23 17:01:28.569 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:28.598 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:28.602 -[BeforeEach] [sig-apps] Deployment +STEP: Creating a kubernetes client 08/24/23 13:07:42.239 +Aug 24 13:07:42.240: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-probe 08/24/23 13:07:42.241 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:07:42.266 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:07:42.273 +[BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 -[It] deployment should support proportional scaling [Conformance] - test/e2e/apps/deployment.go:160 -Jul 29 17:01:28.606: INFO: Creating deployment "webserver-deployment" -Jul 29 17:01:28.618: INFO: Waiting for observed generation 1 -Jul 29 17:01:30.644: INFO: Waiting for all required pods to come up -Jul 29 17:01:30.653: INFO: Pod name httpd: Found 10 pods out of 10 -STEP: ensuring each pod is running 07/29/23 17:01:30.653 -Jul 29 17:01:30.653: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-c2z2l" in namespace "deployment-6991" to be "running" -Jul 29 17:01:30.654: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-qpwjp" in namespace "deployment-6991" to be "running" -Jul 29 17:01:30.654: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-pkv4c" in namespace "deployment-6991" to be "running" -Jul 29 17:01:30.654: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-r5nm4" in namespace "deployment-6991" to be "running" -Jul 29 17:01:30.655: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-t557d" in namespace "deployment-6991" to be "running" -Jul 29 17:01:30.655: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-266xr" in namespace "deployment-6991" to be "running" -Jul 29 17:01:30.665: INFO: Pod "webserver-deployment-7f5969cbc7-t557d": Phase="Pending", Reason="", readiness=false. Elapsed: 10.095554ms -Jul 29 17:01:30.665: INFO: Pod "webserver-deployment-7f5969cbc7-qpwjp": Phase="Pending", Reason="", readiness=false. Elapsed: 11.178758ms -Jul 29 17:01:30.669: INFO: Pod "webserver-deployment-7f5969cbc7-c2z2l": Phase="Pending", Reason="", readiness=false. Elapsed: 15.425976ms -Jul 29 17:01:30.669: INFO: Pod "webserver-deployment-7f5969cbc7-pkv4c": Phase="Pending", Reason="", readiness=false. Elapsed: 14.973042ms -Jul 29 17:01:30.672: INFO: Pod "webserver-deployment-7f5969cbc7-266xr": Phase="Pending", Reason="", readiness=false. Elapsed: 16.209229ms -Jul 29 17:01:30.672: INFO: Pod "webserver-deployment-7f5969cbc7-r5nm4": Phase="Pending", Reason="", readiness=false. Elapsed: 17.198987ms -Jul 29 17:01:32.674: INFO: Pod "webserver-deployment-7f5969cbc7-t557d": Phase="Running", Reason="", readiness=true. Elapsed: 2.01943308s -Jul 29 17:01:32.674: INFO: Pod "webserver-deployment-7f5969cbc7-t557d" satisfied condition "running" -Jul 29 17:01:32.676: INFO: Pod "webserver-deployment-7f5969cbc7-qpwjp": Phase="Running", Reason="", readiness=true. Elapsed: 2.022441734s -Jul 29 17:01:32.676: INFO: Pod "webserver-deployment-7f5969cbc7-qpwjp" satisfied condition "running" -Jul 29 17:01:32.677: INFO: Pod "webserver-deployment-7f5969cbc7-c2z2l": Phase="Running", Reason="", readiness=true. Elapsed: 2.023566333s -Jul 29 17:01:32.677: INFO: Pod "webserver-deployment-7f5969cbc7-c2z2l" satisfied condition "running" -Jul 29 17:01:32.679: INFO: Pod "webserver-deployment-7f5969cbc7-pkv4c": Phase="Running", Reason="", readiness=true. Elapsed: 2.024869259s -Jul 29 17:01:32.679: INFO: Pod "webserver-deployment-7f5969cbc7-pkv4c" satisfied condition "running" -Jul 29 17:01:32.679: INFO: Pod "webserver-deployment-7f5969cbc7-266xr": Phase="Running", Reason="", readiness=true. Elapsed: 2.02391102s -Jul 29 17:01:32.679: INFO: Pod "webserver-deployment-7f5969cbc7-266xr" satisfied condition "running" -Jul 29 17:01:32.680: INFO: Pod "webserver-deployment-7f5969cbc7-r5nm4": Phase="Running", Reason="", readiness=true. Elapsed: 2.025363809s -Jul 29 17:01:32.680: INFO: Pod "webserver-deployment-7f5969cbc7-r5nm4" satisfied condition "running" -Jul 29 17:01:32.680: INFO: Waiting for deployment "webserver-deployment" to complete -Jul 29 17:01:32.694: INFO: Updating deployment "webserver-deployment" with a non-existent image -Jul 29 17:01:32.709: INFO: Updating deployment webserver-deployment -Jul 29 17:01:32.709: INFO: Waiting for observed generation 2 -Jul 29 17:01:34.730: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8 -Jul 29 17:01:34.736: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8 -Jul 29 17:01:34.741: INFO: Waiting for the first rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas -Jul 29 17:01:34.757: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0 -Jul 29 17:01:34.757: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5 -Jul 29 17:01:34.762: INFO: Waiting for the second rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas -Jul 29 17:01:34.772: INFO: Verifying that deployment "webserver-deployment" has minimum required number of available replicas -Jul 29 17:01:34.772: INFO: Scaling up the deployment "webserver-deployment" from 10 to 30 -Jul 29 17:01:34.792: INFO: Updating deployment webserver-deployment -Jul 29 17:01:34.792: INFO: Waiting for the replicasets of deployment "webserver-deployment" to have desired number of replicas -Jul 29 17:01:34.823: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20 -Jul 29 17:01:34.831: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13 -[AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 -Jul 29 17:01:34.854: INFO: Deployment "webserver-deployment": -&Deployment{ObjectMeta:{webserver-deployment deployment-6991 78e2907f-c417-45b3-9b9f-b0ddec758c70 37756 3 2023-07-29 17:01:28 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*30,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc005294be8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:3,Replicas:13,UpdatedReplicas:5,AvailableReplicas:8,UnavailableReplicas:5,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "webserver-deployment-d9f79cb5" is progressing.,LastUpdateTime:2023-07-29 17:01:32 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,},DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2023-07-29 17:01:34 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,},},ReadyReplicas:8,CollisionCount:nil,},} - -Jul 29 17:01:35.029: INFO: New ReplicaSet "webserver-deployment-d9f79cb5" of Deployment "webserver-deployment": -&ReplicaSet{ObjectMeta:{webserver-deployment-d9f79cb5 deployment-6991 b244bdb0-e80e-4e4d-933c-83826790a971 37752 3 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment webserver-deployment 78e2907f-c417-45b3-9b9f-b0ddec758c70 0xc0052950a7 0xc0052950a8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78e2907f-c417-45b3-9b9f-b0ddec758c70\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*13,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: d9f79cb5,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc005295148 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:5,FullyLabeledReplicas:5,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Jul 29 17:01:35.029: INFO: All old ReplicaSets of Deployment "webserver-deployment": -Jul 29 17:01:35.029: INFO: &ReplicaSet{ObjectMeta:{webserver-deployment-7f5969cbc7 deployment-6991 92454c3c-aa77-4c87-a41a-9e0a84882af1 37749 3 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment webserver-deployment 78e2907f-c417-45b3-9b9f-b0ddec758c70 0xc005294fb7 0xc005294fb8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78e2907f-c417-45b3-9b9f-b0ddec758c70\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*20,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 7f5969cbc7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc005295048 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:8,FullyLabeledReplicas:8,ObservedGeneration:2,ReadyReplicas:8,AvailableReplicas:8,Conditions:[]ReplicaSetCondition{},},} -Jul 29 17:01:35.231: INFO: Pod "webserver-deployment-7f5969cbc7-266xr" is available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-266xr webserver-deployment-7f5969cbc7- deployment-6991 143b711f-d68c-4f33-991f-59304dc5bf2d 37653 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca137 0xc0053ca138}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:31 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.68\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-g8f7r,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g8f7r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:10.233.64.68,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:31 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://2f26f5299ed47e514d02512bdfb283e3e217e50aa4407bc772ce11b484bf03b2,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.68,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.235: INFO: Pod "webserver-deployment-7f5969cbc7-68ft9" is available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-68ft9 webserver-deployment-7f5969cbc7- deployment-6991 36406430-5b58-45b8-b43d-f20c969d572d 37620 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca327 0xc0053ca328}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.26\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ddnjd,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ddnjd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:10.233.65.26,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:30 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://5dfc8e659dc88527871d83f4cfe63f0888afb19b6d0fc1b6f151080d613170de,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.26,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.237: INFO: Pod "webserver-deployment-7f5969cbc7-992qv" is not available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-992qv webserver-deployment-7f5969cbc7- deployment-6991 8db497e8-a466-4770-8f44-d63c90af8635 37787 0 2023-07-29 17:01:35 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca517 0xc0053ca518}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-j47wv,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-j47wv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.238: INFO: Pod "webserver-deployment-7f5969cbc7-9ddsm" is not available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-9ddsm webserver-deployment-7f5969cbc7- deployment-6991 cbf9831c-9405-4cc5-8b6d-f3944a79841e 37790 0 2023-07-29 17:01:35 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca657 0xc0053ca658}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-6kv2t,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6kv2t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.239: INFO: Pod "webserver-deployment-7f5969cbc7-bf5jk" is not available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-bf5jk webserver-deployment-7f5969cbc7- deployment-6991 adad2a36-fe17-4129-8f2e-d8a6451686fe 37770 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca7c0 0xc0053ca7c1}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-8knwf,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8knwf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.240: INFO: Pod "webserver-deployment-7f5969cbc7-jcj6q" is not available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-jcj6q webserver-deployment-7f5969cbc7- deployment-6991 a6e0b453-0fee-421e-95cd-258b2bc7cbe1 37786 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca920 0xc0053ca921}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-d4hhn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d4hhn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.240: INFO: Pod "webserver-deployment-7f5969cbc7-jnxqb" is available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-jnxqb webserver-deployment-7f5969cbc7- deployment-6991 424e7684-a0f3-4969-ac3e-89328a3c534b 37613 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053caa80 0xc0053caa81}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.23\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ddtm9,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ddtm9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:10.233.65.23,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:30 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://4c69f820ec6f8f8bff659744422d0c24bb0cf8e8f0b4b56b8f0a22c782bfefd8,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.23,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.241: INFO: Pod "webserver-deployment-7f5969cbc7-kdn7v" is available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-kdn7v webserver-deployment-7f5969cbc7- deployment-6991 fabff63a-b810-49c4-a1c7-e9a349eea03b 37623 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cac67 0xc0053cac68}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.40\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-zdgmk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zdgmk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.40,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:30 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://6f7654c134c78f14368ef115d053ea0f57684c0ce30a7c566183dd9812a5ae76,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.40,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.242: INFO: Pod "webserver-deployment-7f5969cbc7-pkv4c" is available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-pkv4c webserver-deployment-7f5969cbc7- deployment-6991 e73bc416-ab4b-4f1c-9436-ba9d6dbed13e 37643 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cae57 0xc0053cae58}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:31 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.195\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-2znq9,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2znq9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:10.233.65.195,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:30 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://31171be835515d0c4a1cf6f1c9aac9a76592bc9cd35559ed084aa6b746782c41,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.195,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.242: INFO: Pod "webserver-deployment-7f5969cbc7-pqrb7" is not available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-pqrb7 webserver-deployment-7f5969cbc7- deployment-6991 5fc5d9f6-3cb6-4f02-8b92-a329bb5235ea 37788 0 2023-07-29 17:01:35 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb047 0xc0053cb048}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-vwwgj,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vwwgj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.243: INFO: Pod "webserver-deployment-7f5969cbc7-r5nm4" is available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-r5nm4 webserver-deployment-7f5969cbc7- deployment-6991 8e49b83f-a21e-4b35-8356-9647d1975390 37658 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb187 0xc0053cb188}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:31 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.36\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-gk2lm,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gk2lm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:10.233.64.36,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:31 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://d58f8715456a183221ffccdcc9050450c27eccd9a0ac6330ec67588adbdda037,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.36,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.243: INFO: Pod "webserver-deployment-7f5969cbc7-sgkbw" is not available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-sgkbw webserver-deployment-7f5969cbc7- deployment-6991 3ed7ca7b-84cb-4706-abf0-ce6b74e13e56 37758 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb377 0xc0053cb378}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-9hbjs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9hbjs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:,StartTime:2023-07-29 17:01:34 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.244: INFO: Pod "webserver-deployment-7f5969cbc7-t557d" is available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-t557d webserver-deployment-7f5969cbc7- deployment-6991 d841169d-c384-49ea-a8c4-47f2996bf88c 37655 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb547 0xc0053cb548}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:31 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.233\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-g4zm4,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g4zm4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:10.233.64.233,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:31 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://4ec031326fa4cfe239e55294a777ace514050b1fe62007ab424b8e715971e732,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.233,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.244: INFO: Pod "webserver-deployment-7f5969cbc7-wkzhr" is not available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-wkzhr webserver-deployment-7f5969cbc7- deployment-6991 d6fddb68-cfe8-467f-a1db-d70cb7a479b5 37785 0 2023-07-29 17:01:35 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb737 0xc0053cb738}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-n4m5q,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n4m5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.245: INFO: Pod "webserver-deployment-7f5969cbc7-xkblf" is available: -&Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-xkblf webserver-deployment-7f5969cbc7- deployment-6991 98e3a4dd-6985-499d-b9f5-2eb3fefeb0c5 37617 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb8a0 0xc0053cb8a1}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.2\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-6vjjs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vjjs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.2,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:30 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://3a5a157049e478afa56465acb617081043ee510035a008ea65594807accd731a,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.2,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.262: INFO: Pod "webserver-deployment-d9f79cb5-7lzsk" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-7lzsk webserver-deployment-d9f79cb5- deployment-6991 32be55c0-4039-4d3c-bf6d-f48209a39cc8 37765 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0053cba6f 0xc0053cba80}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-249vr,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-249vr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:,StartTime:2023-07-29 17:01:34 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.266: INFO: Pod "webserver-deployment-d9f79cb5-8k6ts" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-8k6ts webserver-deployment-d9f79cb5- deployment-6991 811fc93f-7cd1-4d60-9e5d-a5f9c6bc0df7 37699 0 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0053cbc67 0xc0053cbc68}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-d5786,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d5786,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:,StartTime:2023-07-29 17:01:32 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.266: INFO: Pod "webserver-deployment-d9f79cb5-8lg2m" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-8lg2m webserver-deployment-d9f79cb5- deployment-6991 8629bf15-4be1-49d2-b099-0ce3fb23f356 37687 0 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0053cbe57 0xc0053cbe58}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-rcrww,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rcrww,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:,StartTime:2023-07-29 17:01:32 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.267: INFO: Pod "webserver-deployment-d9f79cb5-c6wgn" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-c6wgn webserver-deployment-d9f79cb5- deployment-6991 3ae857ae-b2d7-4553-9b6a-b20d12038a32 37780 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2047 0xc0054a2048}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-zwrcx,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zwrcx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.267: INFO: Pod "webserver-deployment-d9f79cb5-kqgmf" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-kqgmf webserver-deployment-d9f79cb5- deployment-6991 80002d83-b0cf-42f3-aed2-bafb200d8668 37710 0 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a21af 0xc0054a21c0}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-9t6pb,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9t6pb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:,StartTime:2023-07-29 17:01:32 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.268: INFO: Pod "webserver-deployment-d9f79cb5-mtfmg" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-mtfmg webserver-deployment-d9f79cb5- deployment-6991 68f69621-a630-49c0-9c4d-80c3079afa0a 37694 0 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a23a7 0xc0054a23a8}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-67b62,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-67b62,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:,StartTime:2023-07-29 17:01:32 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.268: INFO: Pod "webserver-deployment-d9f79cb5-n8l94" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-n8l94 webserver-deployment-d9f79cb5- deployment-6991 95a8cc85-d000-44fe-b708-c9bbd126dbad 37782 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2597 0xc0054a2598}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-jxnsz,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jxnsz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.269: INFO: Pod "webserver-deployment-d9f79cb5-p9qkz" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-p9qkz webserver-deployment-d9f79cb5- deployment-6991 160f07e6-0329-42d4-90fe-bd4f17ac45f1 37777 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a26ff 0xc0054a2710}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-wwrpj,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wwrpj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.281: INFO: Pod "webserver-deployment-d9f79cb5-pn9sx" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-pn9sx webserver-deployment-d9f79cb5- deployment-6991 90eb881d-76af-4056-b1a5-3b8d37701d60 37778 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2857 0xc0054a2858}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-lkzx7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lkzx7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:,StartTime:2023-07-29 17:01:34 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.281: INFO: Pod "webserver-deployment-d9f79cb5-r9wxt" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-r9wxt webserver-deployment-d9f79cb5- deployment-6991 aadeaa06-a663-4b3a-9429-51d8b58828a2 37678 0 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2a47 0xc0054a2a48}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-dvscx,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dvscx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:,StartTime:2023-07-29 17:01:32 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.282: INFO: Pod "webserver-deployment-d9f79cb5-wl9nl" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-wl9nl webserver-deployment-d9f79cb5- deployment-6991 c8ed2c9e-2485-4a49-87f8-652c7c77c042 37784 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2c37 0xc0054a2c38}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-r7dwh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r7dwh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:,StartTime:2023-07-29 17:01:35 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:01:35.282: INFO: Pod "webserver-deployment-d9f79cb5-wrvt9" is not available: -&Pod{ObjectMeta:{webserver-deployment-d9f79cb5-wrvt9 webserver-deployment-d9f79cb5- deployment-6991 00fb6a98-b3da-4124-82bf-28cabe3f53c4 37775 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2e27 0xc0054a2e28}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-76mpx,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-76mpx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:,StartTime:2023-07-29 17:01:35 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +[BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 +[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:152 +STEP: Creating pod busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577 in namespace container-probe-9005 08/24/23 13:07:42.277 +Aug 24 13:07:42.295: INFO: Waiting up to 5m0s for pod "busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577" in namespace "container-probe-9005" to be "not pending" +Aug 24 13:07:42.301: INFO: Pod "busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577": Phase="Pending", Reason="", readiness=false. Elapsed: 5.711805ms +Aug 24 13:07:44.312: INFO: Pod "busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577": Phase="Running", Reason="", readiness=true. Elapsed: 2.01722811s +Aug 24 13:07:44.312: INFO: Pod "busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577" satisfied condition "not pending" +Aug 24 13:07:44.313: INFO: Started pod busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577 in namespace container-probe-9005 +STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 13:07:44.313 +Aug 24 13:07:44.320: INFO: Initial restart count of pod busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577 is 0 +STEP: deleting the pod 08/24/23 13:11:45.538 +[AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 -Jul 29 17:01:35.289: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Deployment +Aug 24 13:11:45.578: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 -STEP: Destroying namespace "deployment-6991" for this suite. 07/29/23 17:01:35.348 +STEP: Destroying namespace "container-probe-9005" for this suite. 08/24/23 13:11:45.613 ------------------------------ -• [SLOW TEST] [6.824 seconds] -[sig-apps] Deployment -test/e2e/apps/framework.go:23 - deployment should support proportional scaling [Conformance] - test/e2e/apps/deployment.go:160 +• [SLOW TEST] [243.390 seconds] +[sig-node] Probing container +test/e2e/common/node/framework.go:23 + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:152 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Deployment + [BeforeEach] [sig-node] Probing container set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:01:28.566 - Jul 29 17:01:28.566: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename deployment 07/29/23 17:01:28.569 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:28.598 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:28.602 - [BeforeEach] [sig-apps] Deployment + STEP: Creating a kubernetes client 08/24/23 13:07:42.239 + Aug 24 13:07:42.240: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-probe 08/24/23 13:07:42.241 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:07:42.266 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:07:42.273 + [BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 - [It] deployment should support proportional scaling [Conformance] - test/e2e/apps/deployment.go:160 - Jul 29 17:01:28.606: INFO: Creating deployment "webserver-deployment" - Jul 29 17:01:28.618: INFO: Waiting for observed generation 1 - Jul 29 17:01:30.644: INFO: Waiting for all required pods to come up - Jul 29 17:01:30.653: INFO: Pod name httpd: Found 10 pods out of 10 - STEP: ensuring each pod is running 07/29/23 17:01:30.653 - Jul 29 17:01:30.653: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-c2z2l" in namespace "deployment-6991" to be "running" - Jul 29 17:01:30.654: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-qpwjp" in namespace "deployment-6991" to be "running" - Jul 29 17:01:30.654: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-pkv4c" in namespace "deployment-6991" to be "running" - Jul 29 17:01:30.654: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-r5nm4" in namespace "deployment-6991" to be "running" - Jul 29 17:01:30.655: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-t557d" in namespace "deployment-6991" to be "running" - Jul 29 17:01:30.655: INFO: Waiting up to 5m0s for pod "webserver-deployment-7f5969cbc7-266xr" in namespace "deployment-6991" to be "running" - Jul 29 17:01:30.665: INFO: Pod "webserver-deployment-7f5969cbc7-t557d": Phase="Pending", Reason="", readiness=false. Elapsed: 10.095554ms - Jul 29 17:01:30.665: INFO: Pod "webserver-deployment-7f5969cbc7-qpwjp": Phase="Pending", Reason="", readiness=false. Elapsed: 11.178758ms - Jul 29 17:01:30.669: INFO: Pod "webserver-deployment-7f5969cbc7-c2z2l": Phase="Pending", Reason="", readiness=false. Elapsed: 15.425976ms - Jul 29 17:01:30.669: INFO: Pod "webserver-deployment-7f5969cbc7-pkv4c": Phase="Pending", Reason="", readiness=false. Elapsed: 14.973042ms - Jul 29 17:01:30.672: INFO: Pod "webserver-deployment-7f5969cbc7-266xr": Phase="Pending", Reason="", readiness=false. Elapsed: 16.209229ms - Jul 29 17:01:30.672: INFO: Pod "webserver-deployment-7f5969cbc7-r5nm4": Phase="Pending", Reason="", readiness=false. Elapsed: 17.198987ms - Jul 29 17:01:32.674: INFO: Pod "webserver-deployment-7f5969cbc7-t557d": Phase="Running", Reason="", readiness=true. Elapsed: 2.01943308s - Jul 29 17:01:32.674: INFO: Pod "webserver-deployment-7f5969cbc7-t557d" satisfied condition "running" - Jul 29 17:01:32.676: INFO: Pod "webserver-deployment-7f5969cbc7-qpwjp": Phase="Running", Reason="", readiness=true. Elapsed: 2.022441734s - Jul 29 17:01:32.676: INFO: Pod "webserver-deployment-7f5969cbc7-qpwjp" satisfied condition "running" - Jul 29 17:01:32.677: INFO: Pod "webserver-deployment-7f5969cbc7-c2z2l": Phase="Running", Reason="", readiness=true. Elapsed: 2.023566333s - Jul 29 17:01:32.677: INFO: Pod "webserver-deployment-7f5969cbc7-c2z2l" satisfied condition "running" - Jul 29 17:01:32.679: INFO: Pod "webserver-deployment-7f5969cbc7-pkv4c": Phase="Running", Reason="", readiness=true. Elapsed: 2.024869259s - Jul 29 17:01:32.679: INFO: Pod "webserver-deployment-7f5969cbc7-pkv4c" satisfied condition "running" - Jul 29 17:01:32.679: INFO: Pod "webserver-deployment-7f5969cbc7-266xr": Phase="Running", Reason="", readiness=true. Elapsed: 2.02391102s - Jul 29 17:01:32.679: INFO: Pod "webserver-deployment-7f5969cbc7-266xr" satisfied condition "running" - Jul 29 17:01:32.680: INFO: Pod "webserver-deployment-7f5969cbc7-r5nm4": Phase="Running", Reason="", readiness=true. Elapsed: 2.025363809s - Jul 29 17:01:32.680: INFO: Pod "webserver-deployment-7f5969cbc7-r5nm4" satisfied condition "running" - Jul 29 17:01:32.680: INFO: Waiting for deployment "webserver-deployment" to complete - Jul 29 17:01:32.694: INFO: Updating deployment "webserver-deployment" with a non-existent image - Jul 29 17:01:32.709: INFO: Updating deployment webserver-deployment - Jul 29 17:01:32.709: INFO: Waiting for observed generation 2 - Jul 29 17:01:34.730: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8 - Jul 29 17:01:34.736: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8 - Jul 29 17:01:34.741: INFO: Waiting for the first rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas - Jul 29 17:01:34.757: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0 - Jul 29 17:01:34.757: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5 - Jul 29 17:01:34.762: INFO: Waiting for the second rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas - Jul 29 17:01:34.772: INFO: Verifying that deployment "webserver-deployment" has minimum required number of available replicas - Jul 29 17:01:34.772: INFO: Scaling up the deployment "webserver-deployment" from 10 to 30 - Jul 29 17:01:34.792: INFO: Updating deployment webserver-deployment - Jul 29 17:01:34.792: INFO: Waiting for the replicasets of deployment "webserver-deployment" to have desired number of replicas - Jul 29 17:01:34.823: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20 - Jul 29 17:01:34.831: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13 - [AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 - Jul 29 17:01:34.854: INFO: Deployment "webserver-deployment": - &Deployment{ObjectMeta:{webserver-deployment deployment-6991 78e2907f-c417-45b3-9b9f-b0ddec758c70 37756 3 2023-07-29 17:01:28 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*30,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc005294be8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:3,Replicas:13,UpdatedReplicas:5,AvailableReplicas:8,UnavailableReplicas:5,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "webserver-deployment-d9f79cb5" is progressing.,LastUpdateTime:2023-07-29 17:01:32 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,},DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2023-07-29 17:01:34 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,},},ReadyReplicas:8,CollisionCount:nil,},} - - Jul 29 17:01:35.029: INFO: New ReplicaSet "webserver-deployment-d9f79cb5" of Deployment "webserver-deployment": - &ReplicaSet{ObjectMeta:{webserver-deployment-d9f79cb5 deployment-6991 b244bdb0-e80e-4e4d-933c-83826790a971 37752 3 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment webserver-deployment 78e2907f-c417-45b3-9b9f-b0ddec758c70 0xc0052950a7 0xc0052950a8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78e2907f-c417-45b3-9b9f-b0ddec758c70\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*13,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: d9f79cb5,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc005295148 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:5,FullyLabeledReplicas:5,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - Jul 29 17:01:35.029: INFO: All old ReplicaSets of Deployment "webserver-deployment": - Jul 29 17:01:35.029: INFO: &ReplicaSet{ObjectMeta:{webserver-deployment-7f5969cbc7 deployment-6991 92454c3c-aa77-4c87-a41a-9e0a84882af1 37749 3 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment webserver-deployment 78e2907f-c417-45b3-9b9f-b0ddec758c70 0xc005294fb7 0xc005294fb8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"78e2907f-c417-45b3-9b9f-b0ddec758c70\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*20,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 7f5969cbc7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc005295048 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:8,FullyLabeledReplicas:8,ObservedGeneration:2,ReadyReplicas:8,AvailableReplicas:8,Conditions:[]ReplicaSetCondition{},},} - Jul 29 17:01:35.231: INFO: Pod "webserver-deployment-7f5969cbc7-266xr" is available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-266xr webserver-deployment-7f5969cbc7- deployment-6991 143b711f-d68c-4f33-991f-59304dc5bf2d 37653 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca137 0xc0053ca138}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:31 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.68\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-g8f7r,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g8f7r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:10.233.64.68,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:31 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://2f26f5299ed47e514d02512bdfb283e3e217e50aa4407bc772ce11b484bf03b2,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.68,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.235: INFO: Pod "webserver-deployment-7f5969cbc7-68ft9" is available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-68ft9 webserver-deployment-7f5969cbc7- deployment-6991 36406430-5b58-45b8-b43d-f20c969d572d 37620 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca327 0xc0053ca328}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.26\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ddnjd,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ddnjd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:10.233.65.26,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:30 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://5dfc8e659dc88527871d83f4cfe63f0888afb19b6d0fc1b6f151080d613170de,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.26,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.237: INFO: Pod "webserver-deployment-7f5969cbc7-992qv" is not available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-992qv webserver-deployment-7f5969cbc7- deployment-6991 8db497e8-a466-4770-8f44-d63c90af8635 37787 0 2023-07-29 17:01:35 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca517 0xc0053ca518}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-j47wv,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-j47wv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.238: INFO: Pod "webserver-deployment-7f5969cbc7-9ddsm" is not available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-9ddsm webserver-deployment-7f5969cbc7- deployment-6991 cbf9831c-9405-4cc5-8b6d-f3944a79841e 37790 0 2023-07-29 17:01:35 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca657 0xc0053ca658}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-6kv2t,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6kv2t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.239: INFO: Pod "webserver-deployment-7f5969cbc7-bf5jk" is not available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-bf5jk webserver-deployment-7f5969cbc7- deployment-6991 adad2a36-fe17-4129-8f2e-d8a6451686fe 37770 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca7c0 0xc0053ca7c1}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-8knwf,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8knwf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.240: INFO: Pod "webserver-deployment-7f5969cbc7-jcj6q" is not available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-jcj6q webserver-deployment-7f5969cbc7- deployment-6991 a6e0b453-0fee-421e-95cd-258b2bc7cbe1 37786 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053ca920 0xc0053ca921}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-d4hhn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d4hhn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.240: INFO: Pod "webserver-deployment-7f5969cbc7-jnxqb" is available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-jnxqb webserver-deployment-7f5969cbc7- deployment-6991 424e7684-a0f3-4969-ac3e-89328a3c534b 37613 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053caa80 0xc0053caa81}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.23\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ddtm9,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ddtm9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:10.233.65.23,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:30 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://4c69f820ec6f8f8bff659744422d0c24bb0cf8e8f0b4b56b8f0a22c782bfefd8,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.23,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.241: INFO: Pod "webserver-deployment-7f5969cbc7-kdn7v" is available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-kdn7v webserver-deployment-7f5969cbc7- deployment-6991 fabff63a-b810-49c4-a1c7-e9a349eea03b 37623 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cac67 0xc0053cac68}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.40\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-zdgmk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zdgmk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.40,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:30 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://6f7654c134c78f14368ef115d053ea0f57684c0ce30a7c566183dd9812a5ae76,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.40,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.242: INFO: Pod "webserver-deployment-7f5969cbc7-pkv4c" is available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-pkv4c webserver-deployment-7f5969cbc7- deployment-6991 e73bc416-ab4b-4f1c-9436-ba9d6dbed13e 37643 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cae57 0xc0053cae58}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:31 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.65.195\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-2znq9,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2znq9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:10.233.65.195,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:30 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://31171be835515d0c4a1cf6f1c9aac9a76592bc9cd35559ed084aa6b746782c41,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.65.195,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.242: INFO: Pod "webserver-deployment-7f5969cbc7-pqrb7" is not available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-pqrb7 webserver-deployment-7f5969cbc7- deployment-6991 5fc5d9f6-3cb6-4f02-8b92-a329bb5235ea 37788 0 2023-07-29 17:01:35 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb047 0xc0053cb048}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-vwwgj,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vwwgj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.243: INFO: Pod "webserver-deployment-7f5969cbc7-r5nm4" is available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-r5nm4 webserver-deployment-7f5969cbc7- deployment-6991 8e49b83f-a21e-4b35-8356-9647d1975390 37658 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb187 0xc0053cb188}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:31 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.36\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-gk2lm,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gk2lm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:10.233.64.36,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:31 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://d58f8715456a183221ffccdcc9050450c27eccd9a0ac6330ec67588adbdda037,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.36,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.243: INFO: Pod "webserver-deployment-7f5969cbc7-sgkbw" is not available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-sgkbw webserver-deployment-7f5969cbc7- deployment-6991 3ed7ca7b-84cb-4706-abf0-ce6b74e13e56 37758 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb377 0xc0053cb378}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-9hbjs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9hbjs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:,StartTime:2023-07-29 17:01:34 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.244: INFO: Pod "webserver-deployment-7f5969cbc7-t557d" is available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-t557d webserver-deployment-7f5969cbc7- deployment-6991 d841169d-c384-49ea-a8c4-47f2996bf88c 37655 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb547 0xc0053cb548}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:31 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.64.233\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-g4zm4,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g4zm4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:31 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:10.233.64.233,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:31 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://4ec031326fa4cfe239e55294a777ace514050b1fe62007ab424b8e715971e732,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.64.233,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.244: INFO: Pod "webserver-deployment-7f5969cbc7-wkzhr" is not available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-wkzhr webserver-deployment-7f5969cbc7- deployment-6991 d6fddb68-cfe8-467f-a1db-d70cb7a479b5 37785 0 2023-07-29 17:01:35 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb737 0xc0053cb738}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-n4m5q,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n4m5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.245: INFO: Pod "webserver-deployment-7f5969cbc7-xkblf" is available: - &Pod{ObjectMeta:{webserver-deployment-7f5969cbc7-xkblf webserver-deployment-7f5969cbc7- deployment-6991 98e3a4dd-6985-499d-b9f5-2eb3fefeb0c5 37617 0 2023-07-29 17:01:28 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet webserver-deployment-7f5969cbc7 92454c3c-aa77-4c87-a41a-9e0a84882af1 0xc0053cb8a0 0xc0053cb8a1}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"92454c3c-aa77-4c87-a41a-9e0a84882af1\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.2\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-6vjjs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vjjs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.2,StartTime:2023-07-29 17:01:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:01:30 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://3a5a157049e478afa56465acb617081043ee510035a008ea65594807accd731a,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.2,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.262: INFO: Pod "webserver-deployment-d9f79cb5-7lzsk" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-7lzsk webserver-deployment-d9f79cb5- deployment-6991 32be55c0-4039-4d3c-bf6d-f48209a39cc8 37765 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0053cba6f 0xc0053cba80}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-249vr,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-249vr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:,StartTime:2023-07-29 17:01:34 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.266: INFO: Pod "webserver-deployment-d9f79cb5-8k6ts" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-8k6ts webserver-deployment-d9f79cb5- deployment-6991 811fc93f-7cd1-4d60-9e5d-a5f9c6bc0df7 37699 0 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0053cbc67 0xc0053cbc68}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-d5786,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d5786,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:,StartTime:2023-07-29 17:01:32 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.266: INFO: Pod "webserver-deployment-d9f79cb5-8lg2m" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-8lg2m webserver-deployment-d9f79cb5- deployment-6991 8629bf15-4be1-49d2-b099-0ce3fb23f356 37687 0 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0053cbe57 0xc0053cbe58}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-rcrww,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rcrww,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:,StartTime:2023-07-29 17:01:32 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.267: INFO: Pod "webserver-deployment-d9f79cb5-c6wgn" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-c6wgn webserver-deployment-d9f79cb5- deployment-6991 3ae857ae-b2d7-4553-9b6a-b20d12038a32 37780 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2047 0xc0054a2048}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-zwrcx,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zwrcx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.267: INFO: Pod "webserver-deployment-d9f79cb5-kqgmf" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-kqgmf webserver-deployment-d9f79cb5- deployment-6991 80002d83-b0cf-42f3-aed2-bafb200d8668 37710 0 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a21af 0xc0054a21c0}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-9t6pb,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9t6pb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:,StartTime:2023-07-29 17:01:32 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.268: INFO: Pod "webserver-deployment-d9f79cb5-mtfmg" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-mtfmg webserver-deployment-d9f79cb5- deployment-6991 68f69621-a630-49c0-9c4d-80c3079afa0a 37694 0 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a23a7 0xc0054a23a8}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-67b62,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-67b62,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:,StartTime:2023-07-29 17:01:32 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.268: INFO: Pod "webserver-deployment-d9f79cb5-n8l94" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-n8l94 webserver-deployment-d9f79cb5- deployment-6991 95a8cc85-d000-44fe-b708-c9bbd126dbad 37782 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2597 0xc0054a2598}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-jxnsz,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jxnsz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.269: INFO: Pod "webserver-deployment-d9f79cb5-p9qkz" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-p9qkz webserver-deployment-d9f79cb5- deployment-6991 160f07e6-0329-42d4-90fe-bd4f17ac45f1 37777 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a26ff 0xc0054a2710}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-wwrpj,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wwrpj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.281: INFO: Pod "webserver-deployment-d9f79cb5-pn9sx" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-pn9sx webserver-deployment-d9f79cb5- deployment-6991 90eb881d-76af-4056-b1a5-3b8d37701d60 37778 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2857 0xc0054a2858}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-lkzx7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lkzx7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:,StartTime:2023-07-29 17:01:34 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.281: INFO: Pod "webserver-deployment-d9f79cb5-r9wxt" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-r9wxt webserver-deployment-d9f79cb5- deployment-6991 aadeaa06-a663-4b3a-9429-51d8b58828a2 37678 0 2023-07-29 17:01:32 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2a47 0xc0054a2a48}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:32 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-dvscx,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dvscx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:32 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:,StartTime:2023-07-29 17:01:32 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.282: INFO: Pod "webserver-deployment-d9f79cb5-wl9nl" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-wl9nl webserver-deployment-d9f79cb5- deployment-6991 c8ed2c9e-2485-4a49-87f8-652c7c77c042 37784 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2c37 0xc0054a2c38}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-r7dwh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r7dwh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.120,PodIP:,StartTime:2023-07-29 17:01:35 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:01:35.282: INFO: Pod "webserver-deployment-d9f79cb5-wrvt9" is not available: - &Pod{ObjectMeta:{webserver-deployment-d9f79cb5-wrvt9 webserver-deployment-d9f79cb5- deployment-6991 00fb6a98-b3da-4124-82bf-28cabe3f53c4 37775 0 2023-07-29 17:01:34 +0000 UTC map[name:httpd pod-template-hash:d9f79cb5] map[] [{apps/v1 ReplicaSet webserver-deployment-d9f79cb5 b244bdb0-e80e-4e4d-933c-83826790a971 0xc0054a2e27 0xc0054a2e28}] [] [{kube-controller-manager Update v1 2023-07-29 17:01:34 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"b244bdb0-e80e-4e4d-933c-83826790a971\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:01:35 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-76mpx,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-76mpx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:35 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:01:34 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.211,PodIP:,StartTime:2023-07-29 17:01:35 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - [AfterEach] [sig-apps] Deployment + [BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 + [It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:152 + STEP: Creating pod busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577 in namespace container-probe-9005 08/24/23 13:07:42.277 + Aug 24 13:07:42.295: INFO: Waiting up to 5m0s for pod "busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577" in namespace "container-probe-9005" to be "not pending" + Aug 24 13:07:42.301: INFO: Pod "busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577": Phase="Pending", Reason="", readiness=false. Elapsed: 5.711805ms + Aug 24 13:07:44.312: INFO: Pod "busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577": Phase="Running", Reason="", readiness=true. Elapsed: 2.01722811s + Aug 24 13:07:44.312: INFO: Pod "busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577" satisfied condition "not pending" + Aug 24 13:07:44.313: INFO: Started pod busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577 in namespace container-probe-9005 + STEP: checking the pod's current state and verifying that restartCount is present 08/24/23 13:07:44.313 + Aug 24 13:07:44.320: INFO: Initial restart count of pod busybox-c30b9bd1-e4f3-4960-ade4-1e6f5eb2d577 is 0 + STEP: deleting the pod 08/24/23 13:11:45.538 + [AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 - Jul 29 17:01:35.289: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Deployment + Aug 24 13:11:45.578: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 - STEP: Destroying namespace "deployment-6991" for this suite. 07/29/23 17:01:35.348 + STEP: Destroying namespace "container-probe-9005" for this suite. 08/24/23 13:11:45.613 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSS +SS ------------------------------ -[sig-storage] Projected configMap - should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:47 -[BeforeEach] [sig-storage] Projected configMap +[sig-node] Container Runtime blackbox test on terminated container + should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:248 +[BeforeEach] [sig-node] Container Runtime set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:01:35.404 -Jul 29 17:01:35.404: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 17:01:35.406 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:35.678 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:35.738 -[BeforeEach] [sig-storage] Projected configMap - test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:47 -STEP: Creating configMap with name projected-configmap-test-volume-f32691f7-16a3-47a3-8a59-d44bfb84e9c8 07/29/23 17:01:35.758 -STEP: Creating a pod to test consume configMaps 07/29/23 17:01:35.838 -Jul 29 17:01:36.026: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429" in namespace "projected-2863" to be "Succeeded or Failed" -Jul 29 17:01:36.126: INFO: Pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429": Phase="Pending", Reason="", readiness=false. Elapsed: 100.374143ms -Jul 29 17:01:38.132: INFO: Pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429": Phase="Pending", Reason="", readiness=false. Elapsed: 2.106743338s -Jul 29 17:01:40.135: INFO: Pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429": Phase="Pending", Reason="", readiness=false. Elapsed: 4.109307382s -Jul 29 17:01:42.162: INFO: Pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.136709864s -STEP: Saw pod success 07/29/23 17:01:42.162 -Jul 29 17:01:42.163: INFO: Pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429" satisfied condition "Succeeded or Failed" -Jul 29 17:01:42.169: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429 container agnhost-container: -STEP: delete the pod 07/29/23 17:01:42.197 -Jul 29 17:01:42.247: INFO: Waiting for pod pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429 to disappear -Jul 29 17:01:42.256: INFO: Pod pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429 no longer exists -[AfterEach] [sig-storage] Projected configMap +STEP: Creating a kubernetes client 08/24/23 13:11:45.634 +Aug 24 13:11:45.634: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-runtime 08/24/23 13:11:45.638 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:11:45.67 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:11:45.676 +[BeforeEach] [sig-node] Container Runtime + test/e2e/framework/metrics/init/init.go:31 +[It] should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:248 +STEP: create the container 08/24/23 13:11:45.683 +STEP: wait for the container to reach Succeeded 08/24/23 13:11:45.701 +STEP: get the container status 08/24/23 13:11:49.756 +STEP: the container should be terminated 08/24/23 13:11:49.765 +STEP: the termination message should be set 08/24/23 13:11:49.765 +Aug 24 13:11:49.765: INFO: Expected: &{OK} to match Container's Termination Message: OK -- +STEP: delete the container 08/24/23 13:11:49.766 +[AfterEach] [sig-node] Container Runtime test/e2e/framework/node/init/init.go:32 -Jul 29 17:01:42.259: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected configMap +Aug 24 13:11:49.801: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-node] Container Runtime dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected configMap +[DeferCleanup (Each)] [sig-node] Container Runtime tear down framework | framework.go:193 -STEP: Destroying namespace "projected-2863" for this suite. 07/29/23 17:01:42.268 +STEP: Destroying namespace "container-runtime-1237" for this suite. 08/24/23 13:11:49.81 ------------------------------ -• [SLOW TEST] [6.894 seconds] -[sig-storage] Projected configMap -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:47 +• [4.192 seconds] +[sig-node] Container Runtime +test/e2e/common/node/framework.go:23 + blackbox test + test/e2e/common/node/runtime.go:44 + on terminated container + test/e2e/common/node/runtime.go:137 + should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:248 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected configMap + [BeforeEach] [sig-node] Container Runtime set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:01:35.404 - Jul 29 17:01:35.404: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 17:01:35.406 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:35.678 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:35.738 - [BeforeEach] [sig-storage] Projected configMap + STEP: Creating a kubernetes client 08/24/23 13:11:45.634 + Aug 24 13:11:45.634: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-runtime 08/24/23 13:11:45.638 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:11:45.67 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:11:45.676 + [BeforeEach] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_configmap.go:47 - STEP: Creating configMap with name projected-configmap-test-volume-f32691f7-16a3-47a3-8a59-d44bfb84e9c8 07/29/23 17:01:35.758 - STEP: Creating a pod to test consume configMaps 07/29/23 17:01:35.838 - Jul 29 17:01:36.026: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429" in namespace "projected-2863" to be "Succeeded or Failed" - Jul 29 17:01:36.126: INFO: Pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429": Phase="Pending", Reason="", readiness=false. Elapsed: 100.374143ms - Jul 29 17:01:38.132: INFO: Pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429": Phase="Pending", Reason="", readiness=false. Elapsed: 2.106743338s - Jul 29 17:01:40.135: INFO: Pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429": Phase="Pending", Reason="", readiness=false. Elapsed: 4.109307382s - Jul 29 17:01:42.162: INFO: Pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.136709864s - STEP: Saw pod success 07/29/23 17:01:42.162 - Jul 29 17:01:42.163: INFO: Pod "pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429" satisfied condition "Succeeded or Failed" - Jul 29 17:01:42.169: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429 container agnhost-container: - STEP: delete the pod 07/29/23 17:01:42.197 - Jul 29 17:01:42.247: INFO: Waiting for pod pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429 to disappear - Jul 29 17:01:42.256: INFO: Pod pod-projected-configmaps-54473ee0-17e3-48bd-b622-80c648fe4429 no longer exists - [AfterEach] [sig-storage] Projected configMap + [It] should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:248 + STEP: create the container 08/24/23 13:11:45.683 + STEP: wait for the container to reach Succeeded 08/24/23 13:11:45.701 + STEP: get the container status 08/24/23 13:11:49.756 + STEP: the container should be terminated 08/24/23 13:11:49.765 + STEP: the termination message should be set 08/24/23 13:11:49.765 + Aug 24 13:11:49.765: INFO: Expected: &{OK} to match Container's Termination Message: OK -- + STEP: delete the container 08/24/23 13:11:49.766 + [AfterEach] [sig-node] Container Runtime test/e2e/framework/node/init/init.go:32 - Jul 29 17:01:42.259: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected configMap + Aug 24 13:11:49.801: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-node] Container Runtime dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected configMap + [DeferCleanup (Each)] [sig-node] Container Runtime tear down framework | framework.go:193 - STEP: Destroying namespace "projected-2863" for this suite. 07/29/23 17:01:42.268 + STEP: Destroying namespace "container-runtime-1237" for this suite. 08/24/23 13:11:49.81 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] ConfigMap - should be consumable from pods in volume as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:74 -[BeforeEach] [sig-storage] ConfigMap +[sig-node] RuntimeClass + should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:156 +[BeforeEach] [sig-node] RuntimeClass set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:01:42.305 -Jul 29 17:01:42.306: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename configmap 07/29/23 17:01:42.316 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:42.396 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:42.4 -[BeforeEach] [sig-storage] ConfigMap +STEP: Creating a kubernetes client 08/24/23 13:11:49.84 +Aug 24 13:11:49.840: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename runtimeclass 08/24/23 13:11:49.844 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:11:49.873 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:11:49.882 +[BeforeEach] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:74 -STEP: Creating configMap with name configmap-test-volume-4a27434b-cd64-4950-a48d-3fbb0e23803f 07/29/23 17:01:42.409 -STEP: Creating a pod to test consume configMaps 07/29/23 17:01:42.425 -Jul 29 17:01:42.452: INFO: Waiting up to 5m0s for pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887" in namespace "configmap-9606" to be "Succeeded or Failed" -Jul 29 17:01:42.467: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887": Phase="Pending", Reason="", readiness=false. Elapsed: 14.477123ms -Jul 29 17:01:44.480: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027930139s -Jul 29 17:01:46.474: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887": Phase="Pending", Reason="", readiness=false. Elapsed: 4.021737317s -Jul 29 17:01:48.480: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887": Phase="Pending", Reason="", readiness=false. Elapsed: 6.028027486s -Jul 29 17:01:50.476: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887": Phase="Succeeded", Reason="", readiness=false. Elapsed: 8.02379574s -STEP: Saw pod success 07/29/23 17:01:50.476 -Jul 29 17:01:50.476: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887" satisfied condition "Succeeded or Failed" -Jul 29 17:01:50.482: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887 container agnhost-container: -STEP: delete the pod 07/29/23 17:01:50.499 -Jul 29 17:01:50.526: INFO: Waiting for pod pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887 to disappear -Jul 29 17:01:50.530: INFO: Pod pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887 no longer exists -[AfterEach] [sig-storage] ConfigMap +[It] should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:156 +STEP: Deleting RuntimeClass runtimeclass-4432-delete-me 08/24/23 13:11:49.899 +STEP: Waiting for the RuntimeClass to disappear 08/24/23 13:11:49.915 +[AfterEach] [sig-node] RuntimeClass test/e2e/framework/node/init/init.go:32 -Jul 29 17:01:50.531: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] ConfigMap +Aug 24 13:11:49.944: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-node] RuntimeClass dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] ConfigMap +[DeferCleanup (Each)] [sig-node] RuntimeClass tear down framework | framework.go:193 -STEP: Destroying namespace "configmap-9606" for this suite. 07/29/23 17:01:50.537 +STEP: Destroying namespace "runtimeclass-4432" for this suite. 08/24/23 13:11:49.959 ------------------------------ -• [SLOW TEST] [8.248 seconds] -[sig-storage] ConfigMap -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:74 +• [0.138 seconds] +[sig-node] RuntimeClass +test/e2e/common/node/framework.go:23 + should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:156 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] ConfigMap + [BeforeEach] [sig-node] RuntimeClass set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:01:42.305 - Jul 29 17:01:42.306: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename configmap 07/29/23 17:01:42.316 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:42.396 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:42.4 - [BeforeEach] [sig-storage] ConfigMap + STEP: Creating a kubernetes client 08/24/23 13:11:49.84 + Aug 24 13:11:49.840: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename runtimeclass 08/24/23 13:11:49.844 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:11:49.873 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:11:49.882 + [BeforeEach] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] - test/e2e/common/storage/configmap_volume.go:74 - STEP: Creating configMap with name configmap-test-volume-4a27434b-cd64-4950-a48d-3fbb0e23803f 07/29/23 17:01:42.409 - STEP: Creating a pod to test consume configMaps 07/29/23 17:01:42.425 - Jul 29 17:01:42.452: INFO: Waiting up to 5m0s for pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887" in namespace "configmap-9606" to be "Succeeded or Failed" - Jul 29 17:01:42.467: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887": Phase="Pending", Reason="", readiness=false. Elapsed: 14.477123ms - Jul 29 17:01:44.480: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027930139s - Jul 29 17:01:46.474: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887": Phase="Pending", Reason="", readiness=false. Elapsed: 4.021737317s - Jul 29 17:01:48.480: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887": Phase="Pending", Reason="", readiness=false. Elapsed: 6.028027486s - Jul 29 17:01:50.476: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887": Phase="Succeeded", Reason="", readiness=false. Elapsed: 8.02379574s - STEP: Saw pod success 07/29/23 17:01:50.476 - Jul 29 17:01:50.476: INFO: Pod "pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887" satisfied condition "Succeeded or Failed" - Jul 29 17:01:50.482: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887 container agnhost-container: - STEP: delete the pod 07/29/23 17:01:50.499 - Jul 29 17:01:50.526: INFO: Waiting for pod pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887 to disappear - Jul 29 17:01:50.530: INFO: Pod pod-configmaps-0e2f95b3-cfd6-45aa-80cf-0a2d801a3887 no longer exists - [AfterEach] [sig-storage] ConfigMap + [It] should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] + test/e2e/common/node/runtimeclass.go:156 + STEP: Deleting RuntimeClass runtimeclass-4432-delete-me 08/24/23 13:11:49.899 + STEP: Waiting for the RuntimeClass to disappear 08/24/23 13:11:49.915 + [AfterEach] [sig-node] RuntimeClass test/e2e/framework/node/init/init.go:32 - Jul 29 17:01:50.531: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] ConfigMap + Aug 24 13:11:49.944: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] RuntimeClass test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-node] RuntimeClass dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] ConfigMap + [DeferCleanup (Each)] [sig-node] RuntimeClass tear down framework | framework.go:193 - STEP: Destroying namespace "configmap-9606" for this suite. 07/29/23 17:01:50.537 + STEP: Destroying namespace "runtimeclass-4432" for this suite. 08/24/23 13:11:49.959 << End Captured GinkgoWriter Output ------------------------------ -SSSSSS +SSSS ------------------------------ -[sig-storage] Projected secret - should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:46 -[BeforeEach] [sig-storage] Projected secret +[sig-storage] Secrets + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:57 +[BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:01:50.554 -Jul 29 17:01:50.554: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 17:01:50.558 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:50.582 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:50.587 -[BeforeEach] [sig-storage] Projected secret +STEP: Creating a kubernetes client 08/24/23 13:11:49.981 +Aug 24 13:11:49.981: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 13:11:49.983 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:11:50.022 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:11:50.032 +[BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:46 -STEP: Creating projection with secret that has name projected-secret-test-95cb461e-aee9-44a5-b315-f67183d3b578 07/29/23 17:01:50.591 -STEP: Creating a pod to test consume secrets 07/29/23 17:01:50.6 -Jul 29 17:01:50.617: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321" in namespace "projected-8013" to be "Succeeded or Failed" -Jul 29 17:01:50.622: INFO: Pod "pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321": Phase="Pending", Reason="", readiness=false. Elapsed: 4.809652ms -Jul 29 17:01:52.632: INFO: Pod "pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015595298s -Jul 29 17:01:54.629: INFO: Pod "pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012689641s -STEP: Saw pod success 07/29/23 17:01:54.63 -Jul 29 17:01:54.630: INFO: Pod "pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321" satisfied condition "Succeeded or Failed" -Jul 29 17:01:54.636: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321 container projected-secret-volume-test: -STEP: delete the pod 07/29/23 17:01:54.648 -Jul 29 17:01:54.676: INFO: Waiting for pod pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321 to disappear -Jul 29 17:01:54.681: INFO: Pod pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321 no longer exists -[AfterEach] [sig-storage] Projected secret +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:57 +STEP: Creating secret with name secret-test-ab02b6a7-160b-439f-a5df-6b4983f453ba 08/24/23 13:11:50.037 +STEP: Creating a pod to test consume secrets 08/24/23 13:11:50.047 +Aug 24 13:11:50.069: INFO: Waiting up to 5m0s for pod "pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8" in namespace "secrets-5049" to be "Succeeded or Failed" +Aug 24 13:11:50.085: INFO: Pod "pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8": Phase="Pending", Reason="", readiness=false. Elapsed: 15.576541ms +Aug 24 13:11:52.094: INFO: Pod "pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024314532s +Aug 24 13:11:54.095: INFO: Pod "pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025313026s +STEP: Saw pod success 08/24/23 13:11:54.095 +Aug 24 13:11:54.095: INFO: Pod "pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8" satisfied condition "Succeeded or Failed" +Aug 24 13:11:54.100: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8 container secret-volume-test: +STEP: delete the pod 08/24/23 13:11:54.138 +Aug 24 13:11:54.201: INFO: Waiting for pod pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8 to disappear +Aug 24 13:11:54.207: INFO: Pod pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8 no longer exists +[AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 17:01:54.682: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected secret +Aug 24 13:11:54.207: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected secret +[DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected secret +[DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "projected-8013" for this suite. 07/29/23 17:01:54.693 +STEP: Destroying namespace "secrets-5049" for this suite. 08/24/23 13:11:54.217 ------------------------------ -• [4.151 seconds] -[sig-storage] Projected secret +• [4.252 seconds] +[sig-storage] Secrets test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:46 + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:57 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected secret + [BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:01:50.554 - Jul 29 17:01:50.554: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 17:01:50.558 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:50.582 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:50.587 - [BeforeEach] [sig-storage] Projected secret + STEP: Creating a kubernetes client 08/24/23 13:11:49.981 + Aug 24 13:11:49.981: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 13:11:49.983 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:11:50.022 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:11:50.032 + [BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:46 - STEP: Creating projection with secret that has name projected-secret-test-95cb461e-aee9-44a5-b315-f67183d3b578 07/29/23 17:01:50.591 - STEP: Creating a pod to test consume secrets 07/29/23 17:01:50.6 - Jul 29 17:01:50.617: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321" in namespace "projected-8013" to be "Succeeded or Failed" - Jul 29 17:01:50.622: INFO: Pod "pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321": Phase="Pending", Reason="", readiness=false. Elapsed: 4.809652ms - Jul 29 17:01:52.632: INFO: Pod "pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015595298s - Jul 29 17:01:54.629: INFO: Pod "pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012689641s - STEP: Saw pod success 07/29/23 17:01:54.63 - Jul 29 17:01:54.630: INFO: Pod "pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321" satisfied condition "Succeeded or Failed" - Jul 29 17:01:54.636: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321 container projected-secret-volume-test: - STEP: delete the pod 07/29/23 17:01:54.648 - Jul 29 17:01:54.676: INFO: Waiting for pod pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321 to disappear - Jul 29 17:01:54.681: INFO: Pod pod-projected-secrets-b447626a-3f5a-42d6-b934-8632887d7321 no longer exists - [AfterEach] [sig-storage] Projected secret + [It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:57 + STEP: Creating secret with name secret-test-ab02b6a7-160b-439f-a5df-6b4983f453ba 08/24/23 13:11:50.037 + STEP: Creating a pod to test consume secrets 08/24/23 13:11:50.047 + Aug 24 13:11:50.069: INFO: Waiting up to 5m0s for pod "pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8" in namespace "secrets-5049" to be "Succeeded or Failed" + Aug 24 13:11:50.085: INFO: Pod "pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8": Phase="Pending", Reason="", readiness=false. Elapsed: 15.576541ms + Aug 24 13:11:52.094: INFO: Pod "pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024314532s + Aug 24 13:11:54.095: INFO: Pod "pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025313026s + STEP: Saw pod success 08/24/23 13:11:54.095 + Aug 24 13:11:54.095: INFO: Pod "pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8" satisfied condition "Succeeded or Failed" + Aug 24 13:11:54.100: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8 container secret-volume-test: + STEP: delete the pod 08/24/23 13:11:54.138 + Aug 24 13:11:54.201: INFO: Waiting for pod pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8 to disappear + Aug 24 13:11:54.207: INFO: Pod pod-secrets-703c4443-78df-4a66-9b53-9875de2ed2d8 no longer exists + [AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 17:01:54.682: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected secret + Aug 24 13:11:54.207: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected secret + [DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected secret + [DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "projected-8013" for this suite. 07/29/23 17:01:54.693 + STEP: Destroying namespace "secrets-5049" for this suite. 08/24/23 13:11:54.217 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSS +SSSSSSSS ------------------------------ -[sig-network] HostPort - validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] - test/e2e/network/hostport.go:63 -[BeforeEach] [sig-network] HostPort +[sig-storage] ConfigMap + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:57 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:01:54.709 -Jul 29 17:01:54.709: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename hostport 07/29/23 17:01:54.713 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:54.746 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:54.749 -[BeforeEach] [sig-network] HostPort +STEP: Creating a kubernetes client 08/24/23 13:11:54.236 +Aug 24 13:11:54.236: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 13:11:54.238 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:11:54.261 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:11:54.265 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] HostPort - test/e2e/network/hostport.go:49 -[It] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] - test/e2e/network/hostport.go:63 -STEP: Trying to create a pod(pod1) with hostport 54323 and hostIP 127.0.0.1 and expect scheduled 07/29/23 17:01:54.768 -Jul 29 17:01:54.782: INFO: Waiting up to 5m0s for pod "pod1" in namespace "hostport-5764" to be "running and ready" -Jul 29 17:01:54.788: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 6.391293ms -Jul 29 17:01:54.788: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 17:01:56.796: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 2.013881131s -Jul 29 17:01:56.797: INFO: The phase of Pod pod1 is Running (Ready = true) -Jul 29 17:01:56.797: INFO: Pod "pod1" satisfied condition "running and ready" -STEP: Trying to create another pod(pod2) with hostport 54323 but hostIP 192.168.121.141 on the node which pod1 resides and expect scheduled 07/29/23 17:01:56.797 -Jul 29 17:01:56.810: INFO: Waiting up to 5m0s for pod "pod2" in namespace "hostport-5764" to be "running and ready" -Jul 29 17:01:56.820: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 8.919531ms -Jul 29 17:01:56.820: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 17:01:58.827: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 2.016357153s -Jul 29 17:01:58.827: INFO: The phase of Pod pod2 is Running (Ready = true) -Jul 29 17:01:58.828: INFO: Pod "pod2" satisfied condition "running and ready" -STEP: Trying to create a third pod(pod3) with hostport 54323, hostIP 192.168.121.141 but use UDP protocol on the node which pod2 resides 07/29/23 17:01:58.828 -Jul 29 17:01:58.836: INFO: Waiting up to 5m0s for pod "pod3" in namespace "hostport-5764" to be "running and ready" -Jul 29 17:01:58.847: INFO: Pod "pod3": Phase="Pending", Reason="", readiness=false. Elapsed: 10.035054ms -Jul 29 17:01:58.847: INFO: The phase of Pod pod3 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 17:02:00.855: INFO: Pod "pod3": Phase="Running", Reason="", readiness=false. Elapsed: 2.018433648s -Jul 29 17:02:00.855: INFO: The phase of Pod pod3 is Running (Ready = false) -Jul 29 17:02:02.856: INFO: Pod "pod3": Phase="Running", Reason="", readiness=false. Elapsed: 4.019787923s -Jul 29 17:02:02.857: INFO: The phase of Pod pod3 is Running (Ready = false) -Jul 29 17:02:04.855: INFO: Pod "pod3": Phase="Running", Reason="", readiness=false. Elapsed: 6.018105761s -Jul 29 17:02:04.855: INFO: The phase of Pod pod3 is Running (Ready = false) -Jul 29 17:02:06.854: INFO: Pod "pod3": Phase="Running", Reason="", readiness=false. Elapsed: 8.0178423s -Jul 29 17:02:06.855: INFO: The phase of Pod pod3 is Running (Ready = false) -Jul 29 17:02:08.860: INFO: Pod "pod3": Phase="Running", Reason="", readiness=false. Elapsed: 10.022948496s -Jul 29 17:02:08.860: INFO: The phase of Pod pod3 is Running (Ready = false) -Jul 29 17:02:10.854: INFO: Pod "pod3": Phase="Running", Reason="", readiness=true. Elapsed: 12.017614887s -Jul 29 17:02:10.854: INFO: The phase of Pod pod3 is Running (Ready = true) -Jul 29 17:02:10.855: INFO: Pod "pod3" satisfied condition "running and ready" -Jul 29 17:02:10.862: INFO: Waiting up to 5m0s for pod "e2e-host-exec" in namespace "hostport-5764" to be "running and ready" -Jul 29 17:02:10.868: INFO: Pod "e2e-host-exec": Phase="Pending", Reason="", readiness=false. Elapsed: 5.9555ms -Jul 29 17:02:10.869: INFO: The phase of Pod e2e-host-exec is Pending, waiting for it to be Running (with Ready = true) -Jul 29 17:02:12.877: INFO: Pod "e2e-host-exec": Phase="Running", Reason="", readiness=true. Elapsed: 2.014935444s -Jul 29 17:02:12.878: INFO: The phase of Pod e2e-host-exec is Running (Ready = true) -Jul 29 17:02:12.878: INFO: Pod "e2e-host-exec" satisfied condition "running and ready" -STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54323 07/29/23 17:02:12.884 -Jul 29 17:02:12.884: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 192.168.121.141 http://127.0.0.1:54323/hostname] Namespace:hostport-5764 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 17:02:12.884: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 17:02:12.886: INFO: ExecWithOptions: Clientset creation -Jul 29 17:02:12.886: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-5764/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+--connect-timeout+5+--interface+192.168.121.141+http%3A%2F%2F127.0.0.1%3A54323%2Fhostname&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) -STEP: checking connectivity from pod e2e-host-exec to serverIP: 192.168.121.141, port: 54323 07/29/23 17:02:13.048 -Jul 29 17:02:13.048: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://192.168.121.141:54323/hostname] Namespace:hostport-5764 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 17:02:13.048: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 17:02:13.052: INFO: ExecWithOptions: Clientset creation -Jul 29 17:02:13.052: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-5764/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+--connect-timeout+5+http%3A%2F%2F192.168.121.141%3A54323%2Fhostname&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) -STEP: checking connectivity from pod e2e-host-exec to serverIP: 192.168.121.141, port: 54323 UDP 07/29/23 17:02:13.171 -Jul 29 17:02:13.172: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostname | nc -u -w 5 192.168.121.141 54323] Namespace:hostport-5764 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} -Jul 29 17:02:13.172: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -Jul 29 17:02:13.173: INFO: ExecWithOptions: Clientset creation -Jul 29 17:02:13.173: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-5764/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostname+%7C+nc+-u+-w+5+192.168.121.141+54323&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) -[AfterEach] [sig-network] HostPort +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:57 +STEP: Creating configMap with name configmap-test-volume-2db25835-d6ed-40cc-99a7-78e262dfdd69 08/24/23 13:11:54.269 +STEP: Creating a pod to test consume configMaps 08/24/23 13:11:54.277 +Aug 24 13:11:54.297: INFO: Waiting up to 5m0s for pod "pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df" in namespace "configmap-8659" to be "Succeeded or Failed" +Aug 24 13:11:54.305: INFO: Pod "pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df": Phase="Pending", Reason="", readiness=false. Elapsed: 8.295635ms +Aug 24 13:11:56.316: INFO: Pod "pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019236898s +Aug 24 13:11:58.313: INFO: Pod "pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01652131s +STEP: Saw pod success 08/24/23 13:11:58.313 +Aug 24 13:11:58.314: INFO: Pod "pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df" satisfied condition "Succeeded or Failed" +Aug 24 13:11:58.324: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df container agnhost-container: +STEP: delete the pod 08/24/23 13:11:58.34 +Aug 24 13:11:58.362: INFO: Waiting for pod pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df to disappear +Aug 24 13:11:58.374: INFO: Pod pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df no longer exists +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 17:02:18.282: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] HostPort +Aug 24 13:11:58.374: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] HostPort +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] HostPort +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "hostport-5764" for this suite. 07/29/23 17:02:18.294 +STEP: Destroying namespace "configmap-8659" for this suite. 08/24/23 13:11:58.405 ------------------------------ -• [SLOW TEST] [23.595 seconds] -[sig-network] HostPort -test/e2e/network/common/framework.go:23 - validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] - test/e2e/network/hostport.go:63 +• [4.189 seconds] +[sig-storage] ConfigMap +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:57 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] HostPort + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:01:54.709 - Jul 29 17:01:54.709: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename hostport 07/29/23 17:01:54.713 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:01:54.746 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:01:54.749 - [BeforeEach] [sig-network] HostPort + STEP: Creating a kubernetes client 08/24/23 13:11:54.236 + Aug 24 13:11:54.236: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 13:11:54.238 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:11:54.261 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:11:54.265 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] HostPort - test/e2e/network/hostport.go:49 - [It] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] - test/e2e/network/hostport.go:63 - STEP: Trying to create a pod(pod1) with hostport 54323 and hostIP 127.0.0.1 and expect scheduled 07/29/23 17:01:54.768 - Jul 29 17:01:54.782: INFO: Waiting up to 5m0s for pod "pod1" in namespace "hostport-5764" to be "running and ready" - Jul 29 17:01:54.788: INFO: Pod "pod1": Phase="Pending", Reason="", readiness=false. Elapsed: 6.391293ms - Jul 29 17:01:54.788: INFO: The phase of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 17:01:56.796: INFO: Pod "pod1": Phase="Running", Reason="", readiness=true. Elapsed: 2.013881131s - Jul 29 17:01:56.797: INFO: The phase of Pod pod1 is Running (Ready = true) - Jul 29 17:01:56.797: INFO: Pod "pod1" satisfied condition "running and ready" - STEP: Trying to create another pod(pod2) with hostport 54323 but hostIP 192.168.121.141 on the node which pod1 resides and expect scheduled 07/29/23 17:01:56.797 - Jul 29 17:01:56.810: INFO: Waiting up to 5m0s for pod "pod2" in namespace "hostport-5764" to be "running and ready" - Jul 29 17:01:56.820: INFO: Pod "pod2": Phase="Pending", Reason="", readiness=false. Elapsed: 8.919531ms - Jul 29 17:01:56.820: INFO: The phase of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 17:01:58.827: INFO: Pod "pod2": Phase="Running", Reason="", readiness=true. Elapsed: 2.016357153s - Jul 29 17:01:58.827: INFO: The phase of Pod pod2 is Running (Ready = true) - Jul 29 17:01:58.828: INFO: Pod "pod2" satisfied condition "running and ready" - STEP: Trying to create a third pod(pod3) with hostport 54323, hostIP 192.168.121.141 but use UDP protocol on the node which pod2 resides 07/29/23 17:01:58.828 - Jul 29 17:01:58.836: INFO: Waiting up to 5m0s for pod "pod3" in namespace "hostport-5764" to be "running and ready" - Jul 29 17:01:58.847: INFO: Pod "pod3": Phase="Pending", Reason="", readiness=false. Elapsed: 10.035054ms - Jul 29 17:01:58.847: INFO: The phase of Pod pod3 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 17:02:00.855: INFO: Pod "pod3": Phase="Running", Reason="", readiness=false. Elapsed: 2.018433648s - Jul 29 17:02:00.855: INFO: The phase of Pod pod3 is Running (Ready = false) - Jul 29 17:02:02.856: INFO: Pod "pod3": Phase="Running", Reason="", readiness=false. Elapsed: 4.019787923s - Jul 29 17:02:02.857: INFO: The phase of Pod pod3 is Running (Ready = false) - Jul 29 17:02:04.855: INFO: Pod "pod3": Phase="Running", Reason="", readiness=false. Elapsed: 6.018105761s - Jul 29 17:02:04.855: INFO: The phase of Pod pod3 is Running (Ready = false) - Jul 29 17:02:06.854: INFO: Pod "pod3": Phase="Running", Reason="", readiness=false. Elapsed: 8.0178423s - Jul 29 17:02:06.855: INFO: The phase of Pod pod3 is Running (Ready = false) - Jul 29 17:02:08.860: INFO: Pod "pod3": Phase="Running", Reason="", readiness=false. Elapsed: 10.022948496s - Jul 29 17:02:08.860: INFO: The phase of Pod pod3 is Running (Ready = false) - Jul 29 17:02:10.854: INFO: Pod "pod3": Phase="Running", Reason="", readiness=true. Elapsed: 12.017614887s - Jul 29 17:02:10.854: INFO: The phase of Pod pod3 is Running (Ready = true) - Jul 29 17:02:10.855: INFO: Pod "pod3" satisfied condition "running and ready" - Jul 29 17:02:10.862: INFO: Waiting up to 5m0s for pod "e2e-host-exec" in namespace "hostport-5764" to be "running and ready" - Jul 29 17:02:10.868: INFO: Pod "e2e-host-exec": Phase="Pending", Reason="", readiness=false. Elapsed: 5.9555ms - Jul 29 17:02:10.869: INFO: The phase of Pod e2e-host-exec is Pending, waiting for it to be Running (with Ready = true) - Jul 29 17:02:12.877: INFO: Pod "e2e-host-exec": Phase="Running", Reason="", readiness=true. Elapsed: 2.014935444s - Jul 29 17:02:12.878: INFO: The phase of Pod e2e-host-exec is Running (Ready = true) - Jul 29 17:02:12.878: INFO: Pod "e2e-host-exec" satisfied condition "running and ready" - STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54323 07/29/23 17:02:12.884 - Jul 29 17:02:12.884: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 192.168.121.141 http://127.0.0.1:54323/hostname] Namespace:hostport-5764 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 17:02:12.884: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 17:02:12.886: INFO: ExecWithOptions: Clientset creation - Jul 29 17:02:12.886: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-5764/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+--connect-timeout+5+--interface+192.168.121.141+http%3A%2F%2F127.0.0.1%3A54323%2Fhostname&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) - STEP: checking connectivity from pod e2e-host-exec to serverIP: 192.168.121.141, port: 54323 07/29/23 17:02:13.048 - Jul 29 17:02:13.048: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://192.168.121.141:54323/hostname] Namespace:hostport-5764 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 17:02:13.048: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 17:02:13.052: INFO: ExecWithOptions: Clientset creation - Jul 29 17:02:13.052: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-5764/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=curl+-g+--connect-timeout+5+http%3A%2F%2F192.168.121.141%3A54323%2Fhostname&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) - STEP: checking connectivity from pod e2e-host-exec to serverIP: 192.168.121.141, port: 54323 UDP 07/29/23 17:02:13.171 - Jul 29 17:02:13.172: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostname | nc -u -w 5 192.168.121.141 54323] Namespace:hostport-5764 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} - Jul 29 17:02:13.172: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - Jul 29 17:02:13.173: INFO: ExecWithOptions: Clientset creation - Jul 29 17:02:13.173: INFO: ExecWithOptions: execute(POST https://10.233.0.1:443/api/v1/namespaces/hostport-5764/pods/e2e-host-exec/exec?command=%2Fbin%2Fsh&command=-c&command=echo+hostname+%7C+nc+-u+-w+5+192.168.121.141+54323&container=e2e-host-exec&container=e2e-host-exec&stderr=true&stdout=true) - [AfterEach] [sig-network] HostPort + [It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:57 + STEP: Creating configMap with name configmap-test-volume-2db25835-d6ed-40cc-99a7-78e262dfdd69 08/24/23 13:11:54.269 + STEP: Creating a pod to test consume configMaps 08/24/23 13:11:54.277 + Aug 24 13:11:54.297: INFO: Waiting up to 5m0s for pod "pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df" in namespace "configmap-8659" to be "Succeeded or Failed" + Aug 24 13:11:54.305: INFO: Pod "pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df": Phase="Pending", Reason="", readiness=false. Elapsed: 8.295635ms + Aug 24 13:11:56.316: INFO: Pod "pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019236898s + Aug 24 13:11:58.313: INFO: Pod "pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01652131s + STEP: Saw pod success 08/24/23 13:11:58.313 + Aug 24 13:11:58.314: INFO: Pod "pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df" satisfied condition "Succeeded or Failed" + Aug 24 13:11:58.324: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df container agnhost-container: + STEP: delete the pod 08/24/23 13:11:58.34 + Aug 24 13:11:58.362: INFO: Waiting for pod pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df to disappear + Aug 24 13:11:58.374: INFO: Pod pod-configmaps-5f7b80c7-8877-4a54-b377-75917544b9df no longer exists + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 17:02:18.282: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] HostPort + Aug 24 13:11:58.374: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] HostPort + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] HostPort + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "hostport-5764" for this suite. 07/29/23 17:02:18.294 + STEP: Destroying namespace "configmap-8659" for this suite. 08/24/23 13:11:58.405 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSS +SSSSSSSSSS ------------------------------ -[sig-network] EndpointSliceMirroring - should mirror a custom Endpoints resource through create update and delete [Conformance] - test/e2e/network/endpointslicemirroring.go:53 -[BeforeEach] [sig-network] EndpointSliceMirroring +[sig-node] Containers + should be able to override the image's default command and arguments [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:87 +[BeforeEach] [sig-node] Containers set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:02:18.308 -Jul 29 17:02:18.308: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename endpointslicemirroring 07/29/23 17:02:18.311 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:02:18.346 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:02:18.351 -[BeforeEach] [sig-network] EndpointSliceMirroring +STEP: Creating a kubernetes client 08/24/23 13:11:58.429 +Aug 24 13:11:58.429: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename containers 08/24/23 13:11:58.431 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:11:58.472 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:11:58.478 +[BeforeEach] [sig-node] Containers test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] EndpointSliceMirroring - test/e2e/network/endpointslicemirroring.go:41 -[It] should mirror a custom Endpoints resource through create update and delete [Conformance] - test/e2e/network/endpointslicemirroring.go:53 -STEP: mirroring a new custom Endpoint 07/29/23 17:02:18.374 -Jul 29 17:02:18.389: INFO: Waiting for at least 1 EndpointSlice to exist, got 0 -STEP: mirroring an update to a custom Endpoint 07/29/23 17:02:20.399 -Jul 29 17:02:20.413: INFO: Expected EndpointSlice to have 10.2.3.4 as address, got 10.1.2.3 -STEP: mirroring deletion of a custom Endpoint 07/29/23 17:02:22.421 -Jul 29 17:02:22.440: INFO: Waiting for 0 EndpointSlices to exist, got 1 -[AfterEach] [sig-network] EndpointSliceMirroring +[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:87 +STEP: Creating a pod to test override all 08/24/23 13:11:58.483 +Aug 24 13:11:58.498: INFO: Waiting up to 5m0s for pod "client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590" in namespace "containers-609" to be "Succeeded or Failed" +Aug 24 13:11:58.510: INFO: Pod "client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590": Phase="Pending", Reason="", readiness=false. Elapsed: 12.020836ms +Aug 24 13:12:00.522: INFO: Pod "client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023599534s +Aug 24 13:12:02.534: INFO: Pod "client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.035815319s +STEP: Saw pod success 08/24/23 13:12:02.534 +Aug 24 13:12:02.534: INFO: Pod "client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590" satisfied condition "Succeeded or Failed" +Aug 24 13:12:02.543: INFO: Trying to get logs from node pe9deep4seen-3 pod client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590 container agnhost-container: +STEP: delete the pod 08/24/23 13:12:02.564 +Aug 24 13:12:02.586: INFO: Waiting for pod client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590 to disappear +Aug 24 13:12:02.593: INFO: Pod client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590 no longer exists +[AfterEach] [sig-node] Containers test/e2e/framework/node/init/init.go:32 -Jul 29 17:02:24.448: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] EndpointSliceMirroring +Aug 24 13:12:02.595: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Containers test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] EndpointSliceMirroring +[DeferCleanup (Each)] [sig-node] Containers dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] EndpointSliceMirroring +[DeferCleanup (Each)] [sig-node] Containers tear down framework | framework.go:193 -STEP: Destroying namespace "endpointslicemirroring-4293" for this suite. 07/29/23 17:02:24.46 +STEP: Destroying namespace "containers-609" for this suite. 08/24/23 13:12:02.605 ------------------------------ -• [SLOW TEST] [6.169 seconds] -[sig-network] EndpointSliceMirroring -test/e2e/network/common/framework.go:23 - should mirror a custom Endpoints resource through create update and delete [Conformance] - test/e2e/network/endpointslicemirroring.go:53 +• [4.192 seconds] +[sig-node] Containers +test/e2e/common/node/framework.go:23 + should be able to override the image's default command and arguments [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:87 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] EndpointSliceMirroring + [BeforeEach] [sig-node] Containers set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:02:18.308 - Jul 29 17:02:18.308: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename endpointslicemirroring 07/29/23 17:02:18.311 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:02:18.346 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:02:18.351 - [BeforeEach] [sig-network] EndpointSliceMirroring + STEP: Creating a kubernetes client 08/24/23 13:11:58.429 + Aug 24 13:11:58.429: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename containers 08/24/23 13:11:58.431 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:11:58.472 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:11:58.478 + [BeforeEach] [sig-node] Containers test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] EndpointSliceMirroring - test/e2e/network/endpointslicemirroring.go:41 - [It] should mirror a custom Endpoints resource through create update and delete [Conformance] - test/e2e/network/endpointslicemirroring.go:53 - STEP: mirroring a new custom Endpoint 07/29/23 17:02:18.374 - Jul 29 17:02:18.389: INFO: Waiting for at least 1 EndpointSlice to exist, got 0 - STEP: mirroring an update to a custom Endpoint 07/29/23 17:02:20.399 - Jul 29 17:02:20.413: INFO: Expected EndpointSlice to have 10.2.3.4 as address, got 10.1.2.3 - STEP: mirroring deletion of a custom Endpoint 07/29/23 17:02:22.421 - Jul 29 17:02:22.440: INFO: Waiting for 0 EndpointSlices to exist, got 1 - [AfterEach] [sig-network] EndpointSliceMirroring + [It] should be able to override the image's default command and arguments [NodeConformance] [Conformance] + test/e2e/common/node/containers.go:87 + STEP: Creating a pod to test override all 08/24/23 13:11:58.483 + Aug 24 13:11:58.498: INFO: Waiting up to 5m0s for pod "client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590" in namespace "containers-609" to be "Succeeded or Failed" + Aug 24 13:11:58.510: INFO: Pod "client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590": Phase="Pending", Reason="", readiness=false. Elapsed: 12.020836ms + Aug 24 13:12:00.522: INFO: Pod "client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023599534s + Aug 24 13:12:02.534: INFO: Pod "client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.035815319s + STEP: Saw pod success 08/24/23 13:12:02.534 + Aug 24 13:12:02.534: INFO: Pod "client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590" satisfied condition "Succeeded or Failed" + Aug 24 13:12:02.543: INFO: Trying to get logs from node pe9deep4seen-3 pod client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590 container agnhost-container: + STEP: delete the pod 08/24/23 13:12:02.564 + Aug 24 13:12:02.586: INFO: Waiting for pod client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590 to disappear + Aug 24 13:12:02.593: INFO: Pod client-containers-bfdf2d43-dfb8-465a-aa7f-1a86ac911590 no longer exists + [AfterEach] [sig-node] Containers test/e2e/framework/node/init/init.go:32 - Jul 29 17:02:24.448: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] EndpointSliceMirroring + Aug 24 13:12:02.595: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Containers test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] EndpointSliceMirroring + [DeferCleanup (Each)] [sig-node] Containers dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] EndpointSliceMirroring + [DeferCleanup (Each)] [sig-node] Containers tear down framework | framework.go:193 - STEP: Destroying namespace "endpointslicemirroring-4293" for this suite. 07/29/23 17:02:24.46 + STEP: Destroying namespace "containers-609" for this suite. 08/24/23 13:12:02.605 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSS ------------------------------ -[sig-storage] Downward API volume - should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:84 -[BeforeEach] [sig-storage] Downward API volume +[sig-storage] Projected secret + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:67 +[BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:02:24.477 -Jul 29 17:02:24.477: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 17:02:24.479 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:02:24.511 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:02:24.516 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 -[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:84 -STEP: Creating a pod to test downward API volume plugin 07/29/23 17:02:24.522 -Jul 29 17:02:24.551: INFO: Waiting up to 5m0s for pod "downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7" in namespace "downward-api-3543" to be "Succeeded or Failed" -Jul 29 17:02:24.563: INFO: Pod "downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7": Phase="Pending", Reason="", readiness=false. Elapsed: 11.60256ms -Jul 29 17:02:26.572: INFO: Pod "downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.0211656s -Jul 29 17:02:28.573: INFO: Pod "downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021460394s -STEP: Saw pod success 07/29/23 17:02:28.573 -Jul 29 17:02:28.573: INFO: Pod "downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7" satisfied condition "Succeeded or Failed" -Jul 29 17:02:28.591: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7 container client-container: -STEP: delete the pod 07/29/23 17:02:28.619 -Jul 29 17:02:28.643: INFO: Waiting for pod downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7 to disappear -Jul 29 17:02:28.649: INFO: Pod downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7 no longer exists -[AfterEach] [sig-storage] Downward API volume +STEP: Creating a kubernetes client 08/24/23 13:12:02.626 +Aug 24 13:12:02.627: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 13:12:02.63 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:12:02.657 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:12:02.668 +[BeforeEach] [sig-storage] Projected secret + test/e2e/framework/metrics/init/init.go:31 +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:67 +STEP: Creating projection with secret that has name projected-secret-test-bc7fdc65-38d3-4b69-b843-b00fb7fb9bf0 08/24/23 13:12:02.675 +STEP: Creating a pod to test consume secrets 08/24/23 13:12:02.705 +Aug 24 13:12:02.730: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4" in namespace "projected-7250" to be "Succeeded or Failed" +Aug 24 13:12:02.752: INFO: Pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4": Phase="Pending", Reason="", readiness=false. Elapsed: 21.690669ms +Aug 24 13:12:04.759: INFO: Pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.028823397s +Aug 24 13:12:06.762: INFO: Pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4": Phase="Pending", Reason="", readiness=false. Elapsed: 4.031269995s +Aug 24 13:12:08.759: INFO: Pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.02886189s +STEP: Saw pod success 08/24/23 13:12:08.759 +Aug 24 13:12:08.760: INFO: Pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4" satisfied condition "Succeeded or Failed" +Aug 24 13:12:08.764: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4 container projected-secret-volume-test: +STEP: delete the pod 08/24/23 13:12:08.776 +Aug 24 13:12:08.792: INFO: Waiting for pod pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4 to disappear +Aug 24 13:12:08.796: INFO: Pod pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4 no longer exists +[AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 -Jul 29 17:02:28.650: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Downward API volume +Aug 24 13:12:08.797: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Downward API volume +[DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-3543" for this suite. 07/29/23 17:02:28.662 +STEP: Destroying namespace "projected-7250" for this suite. 08/24/23 13:12:08.804 ------------------------------ -• [4.197 seconds] -[sig-storage] Downward API volume +• [SLOW TEST] [6.189 seconds] +[sig-storage] Projected secret test/e2e/common/storage/framework.go:23 - should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:84 + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:67 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Downward API volume + [BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:02:24.477 - Jul 29 17:02:24.477: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 17:02:24.479 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:02:24.511 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:02:24.516 - [BeforeEach] [sig-storage] Downward API volume + STEP: Creating a kubernetes client 08/24/23 13:12:02.626 + Aug 24 13:12:02.627: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 13:12:02.63 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:12:02.657 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:12:02.668 + [BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Downward API volume - test/e2e/common/storage/downwardapi_volume.go:44 - [It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/downwardapi_volume.go:84 - STEP: Creating a pod to test downward API volume plugin 07/29/23 17:02:24.522 - Jul 29 17:02:24.551: INFO: Waiting up to 5m0s for pod "downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7" in namespace "downward-api-3543" to be "Succeeded or Failed" - Jul 29 17:02:24.563: INFO: Pod "downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7": Phase="Pending", Reason="", readiness=false. Elapsed: 11.60256ms - Jul 29 17:02:26.572: INFO: Pod "downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7": Phase="Pending", Reason="", readiness=false. Elapsed: 2.0211656s - Jul 29 17:02:28.573: INFO: Pod "downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021460394s - STEP: Saw pod success 07/29/23 17:02:28.573 - Jul 29 17:02:28.573: INFO: Pod "downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7" satisfied condition "Succeeded or Failed" - Jul 29 17:02:28.591: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7 container client-container: - STEP: delete the pod 07/29/23 17:02:28.619 - Jul 29 17:02:28.643: INFO: Waiting for pod downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7 to disappear - Jul 29 17:02:28.649: INFO: Pod downwardapi-volume-76ac0318-c1b6-4ca4-aed6-c94367a47be7 no longer exists - [AfterEach] [sig-storage] Downward API volume + [It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:67 + STEP: Creating projection with secret that has name projected-secret-test-bc7fdc65-38d3-4b69-b843-b00fb7fb9bf0 08/24/23 13:12:02.675 + STEP: Creating a pod to test consume secrets 08/24/23 13:12:02.705 + Aug 24 13:12:02.730: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4" in namespace "projected-7250" to be "Succeeded or Failed" + Aug 24 13:12:02.752: INFO: Pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4": Phase="Pending", Reason="", readiness=false. Elapsed: 21.690669ms + Aug 24 13:12:04.759: INFO: Pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.028823397s + Aug 24 13:12:06.762: INFO: Pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4": Phase="Pending", Reason="", readiness=false. Elapsed: 4.031269995s + Aug 24 13:12:08.759: INFO: Pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.02886189s + STEP: Saw pod success 08/24/23 13:12:08.759 + Aug 24 13:12:08.760: INFO: Pod "pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4" satisfied condition "Succeeded or Failed" + Aug 24 13:12:08.764: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4 container projected-secret-volume-test: + STEP: delete the pod 08/24/23 13:12:08.776 + Aug 24 13:12:08.792: INFO: Waiting for pod pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4 to disappear + Aug 24 13:12:08.796: INFO: Pod pod-projected-secrets-3db75477-2594-45bf-9e68-408e6519bbe4 no longer exists + [AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 - Jul 29 17:02:28.650: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Downward API volume + Aug 24 13:12:08.797: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Downward API volume + [DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-3543" for this suite. 07/29/23 17:02:28.662 + STEP: Destroying namespace "projected-7250" for this suite. 08/24/23 13:12:08.804 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSS +SSSSSSSSSSS ------------------------------ -[sig-storage] Subpath Atomic writer volumes - should support subpaths with configmap pod with mountPath of existing file [Conformance] - test/e2e/storage/subpath.go:80 -[BeforeEach] [sig-storage] Subpath +[sig-node] Pods + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:398 +[BeforeEach] [sig-node] Pods set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:02:28.68 -Jul 29 17:02:28.680: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename subpath 07/29/23 17:02:28.683 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:02:28.714 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:02:28.72 -[BeforeEach] [sig-storage] Subpath +STEP: Creating a kubernetes client 08/24/23 13:12:08.817 +Aug 24 13:12:08.818: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 13:12:08.82 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:12:08.849 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:12:08.854 +[BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] Atomic writer volumes - test/e2e/storage/subpath.go:40 -STEP: Setting up data 07/29/23 17:02:28.724 -[It] should support subpaths with configmap pod with mountPath of existing file [Conformance] - test/e2e/storage/subpath.go:80 -STEP: Creating pod pod-subpath-test-configmap-sbxb 07/29/23 17:02:28.747 -STEP: Creating a pod to test atomic-volume-subpath 07/29/23 17:02:28.747 -Jul 29 17:02:28.769: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-sbxb" in namespace "subpath-4333" to be "Succeeded or Failed" -Jul 29 17:02:28.782: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Pending", Reason="", readiness=false. Elapsed: 12.582853ms -Jul 29 17:02:30.794: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 2.025120627s -Jul 29 17:02:32.790: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 4.020834089s -Jul 29 17:02:34.790: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 6.020614647s -Jul 29 17:02:36.792: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 8.022540495s -Jul 29 17:02:38.791: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 10.021582869s -Jul 29 17:02:40.791: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 12.021682994s -Jul 29 17:02:42.790: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 14.020356587s -Jul 29 17:02:44.791: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 16.021693242s -Jul 29 17:02:46.797: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 18.027224123s -Jul 29 17:02:48.790: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 20.020492107s -Jul 29 17:02:50.794: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=false. Elapsed: 22.024705879s -Jul 29 17:02:52.799: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.029236547s -STEP: Saw pod success 07/29/23 17:02:52.799 -Jul 29 17:02:52.800: INFO: Pod "pod-subpath-test-configmap-sbxb" satisfied condition "Succeeded or Failed" -Jul 29 17:02:52.810: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-subpath-test-configmap-sbxb container test-container-subpath-configmap-sbxb: -STEP: delete the pod 07/29/23 17:02:52.832 -Jul 29 17:02:52.864: INFO: Waiting for pod pod-subpath-test-configmap-sbxb to disappear -Jul 29 17:02:52.871: INFO: Pod pod-subpath-test-configmap-sbxb no longer exists -STEP: Deleting pod pod-subpath-test-configmap-sbxb 07/29/23 17:02:52.872 -Jul 29 17:02:52.872: INFO: Deleting pod "pod-subpath-test-configmap-sbxb" in namespace "subpath-4333" -[AfterEach] [sig-storage] Subpath +[BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 +[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:398 +STEP: creating the pod 08/24/23 13:12:08.859 +STEP: submitting the pod to kubernetes 08/24/23 13:12:08.86 +Aug 24 13:12:08.877: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640" in namespace "pods-5216" to be "running and ready" +Aug 24 13:12:08.893: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Pending", Reason="", readiness=false. Elapsed: 15.433896ms +Aug 24 13:12:08.893: INFO: The phase of Pod pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 13:12:10.901: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Running", Reason="", readiness=true. Elapsed: 2.023527006s +Aug 24 13:12:10.901: INFO: The phase of Pod pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640 is Running (Ready = true) +Aug 24 13:12:10.901: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640" satisfied condition "running and ready" +STEP: verifying the pod is in kubernetes 08/24/23 13:12:10.907 +STEP: updating the pod 08/24/23 13:12:10.913 +Aug 24 13:12:11.435: INFO: Successfully updated pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640" +Aug 24 13:12:11.436: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640" in namespace "pods-5216" to be "terminated with reason DeadlineExceeded" +Aug 24 13:12:11.442: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Running", Reason="", readiness=true. Elapsed: 6.247469ms +Aug 24 13:12:13.458: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Running", Reason="", readiness=true. Elapsed: 2.021986185s +Aug 24 13:12:15.449: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Running", Reason="", readiness=false. Elapsed: 4.013159463s +Aug 24 13:12:17.453: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 6.017125746s +Aug 24 13:12:17.453: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640" satisfied condition "terminated with reason DeadlineExceeded" +[AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 -Jul 29 17:02:52.879: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Subpath +Aug 24 13:12:17.453: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Subpath +[DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Subpath +[DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 -STEP: Destroying namespace "subpath-4333" for this suite. 07/29/23 17:02:52.892 +STEP: Destroying namespace "pods-5216" for this suite. 08/24/23 13:12:17.462 ------------------------------ -• [SLOW TEST] [24.226 seconds] -[sig-storage] Subpath -test/e2e/storage/utils/framework.go:23 - Atomic writer volumes - test/e2e/storage/subpath.go:36 - should support subpaths with configmap pod with mountPath of existing file [Conformance] - test/e2e/storage/subpath.go:80 +• [SLOW TEST] [8.655 seconds] +[sig-node] Pods +test/e2e/common/node/framework.go:23 + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:398 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Subpath + [BeforeEach] [sig-node] Pods set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:02:28.68 - Jul 29 17:02:28.680: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename subpath 07/29/23 17:02:28.683 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:02:28.714 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:02:28.72 - [BeforeEach] [sig-storage] Subpath + STEP: Creating a kubernetes client 08/24/23 13:12:08.817 + Aug 24 13:12:08.818: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 13:12:08.82 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:12:08.849 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:12:08.854 + [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] Atomic writer volumes - test/e2e/storage/subpath.go:40 - STEP: Setting up data 07/29/23 17:02:28.724 - [It] should support subpaths with configmap pod with mountPath of existing file [Conformance] - test/e2e/storage/subpath.go:80 - STEP: Creating pod pod-subpath-test-configmap-sbxb 07/29/23 17:02:28.747 - STEP: Creating a pod to test atomic-volume-subpath 07/29/23 17:02:28.747 - Jul 29 17:02:28.769: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-sbxb" in namespace "subpath-4333" to be "Succeeded or Failed" - Jul 29 17:02:28.782: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Pending", Reason="", readiness=false. Elapsed: 12.582853ms - Jul 29 17:02:30.794: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 2.025120627s - Jul 29 17:02:32.790: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 4.020834089s - Jul 29 17:02:34.790: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 6.020614647s - Jul 29 17:02:36.792: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 8.022540495s - Jul 29 17:02:38.791: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 10.021582869s - Jul 29 17:02:40.791: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 12.021682994s - Jul 29 17:02:42.790: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 14.020356587s - Jul 29 17:02:44.791: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 16.021693242s - Jul 29 17:02:46.797: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 18.027224123s - Jul 29 17:02:48.790: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=true. Elapsed: 20.020492107s - Jul 29 17:02:50.794: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Running", Reason="", readiness=false. Elapsed: 22.024705879s - Jul 29 17:02:52.799: INFO: Pod "pod-subpath-test-configmap-sbxb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.029236547s - STEP: Saw pod success 07/29/23 17:02:52.799 - Jul 29 17:02:52.800: INFO: Pod "pod-subpath-test-configmap-sbxb" satisfied condition "Succeeded or Failed" - Jul 29 17:02:52.810: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-subpath-test-configmap-sbxb container test-container-subpath-configmap-sbxb: - STEP: delete the pod 07/29/23 17:02:52.832 - Jul 29 17:02:52.864: INFO: Waiting for pod pod-subpath-test-configmap-sbxb to disappear - Jul 29 17:02:52.871: INFO: Pod pod-subpath-test-configmap-sbxb no longer exists - STEP: Deleting pod pod-subpath-test-configmap-sbxb 07/29/23 17:02:52.872 - Jul 29 17:02:52.872: INFO: Deleting pod "pod-subpath-test-configmap-sbxb" in namespace "subpath-4333" - [AfterEach] [sig-storage] Subpath + [BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 + [It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:398 + STEP: creating the pod 08/24/23 13:12:08.859 + STEP: submitting the pod to kubernetes 08/24/23 13:12:08.86 + Aug 24 13:12:08.877: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640" in namespace "pods-5216" to be "running and ready" + Aug 24 13:12:08.893: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Pending", Reason="", readiness=false. Elapsed: 15.433896ms + Aug 24 13:12:08.893: INFO: The phase of Pod pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 13:12:10.901: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Running", Reason="", readiness=true. Elapsed: 2.023527006s + Aug 24 13:12:10.901: INFO: The phase of Pod pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640 is Running (Ready = true) + Aug 24 13:12:10.901: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640" satisfied condition "running and ready" + STEP: verifying the pod is in kubernetes 08/24/23 13:12:10.907 + STEP: updating the pod 08/24/23 13:12:10.913 + Aug 24 13:12:11.435: INFO: Successfully updated pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640" + Aug 24 13:12:11.436: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640" in namespace "pods-5216" to be "terminated with reason DeadlineExceeded" + Aug 24 13:12:11.442: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Running", Reason="", readiness=true. Elapsed: 6.247469ms + Aug 24 13:12:13.458: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Running", Reason="", readiness=true. Elapsed: 2.021986185s + Aug 24 13:12:15.449: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Running", Reason="", readiness=false. Elapsed: 4.013159463s + Aug 24 13:12:17.453: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 6.017125746s + Aug 24 13:12:17.453: INFO: Pod "pod-update-activedeadlineseconds-f57a9310-f0fc-4494-ab73-34478f1de640" satisfied condition "terminated with reason DeadlineExceeded" + [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 - Jul 29 17:02:52.879: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Subpath + Aug 24 13:12:17.453: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Subpath + [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Subpath + [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 - STEP: Destroying namespace "subpath-4333" for this suite. 07/29/23 17:02:52.892 + STEP: Destroying namespace "pods-5216" for this suite. 08/24/23 13:12:17.462 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-apps] Deployment - deployment should support rollover [Conformance] - test/e2e/apps/deployment.go:132 -[BeforeEach] [sig-apps] Deployment +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + should perform rolling updates and roll backs of template modifications [Conformance] + test/e2e/apps/statefulset.go:306 +[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:02:52.912 -Jul 29 17:02:52.912: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename deployment 07/29/23 17:02:52.915 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:02:52.949 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:02:52.955 -[BeforeEach] [sig-apps] Deployment +STEP: Creating a kubernetes client 08/24/23 13:12:17.476 +Aug 24 13:12:17.476: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename statefulset 08/24/23 13:12:17.478 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:12:17.502 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:12:17.506 +[BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 -[It] deployment should support rollover [Conformance] - test/e2e/apps/deployment.go:132 -Jul 29 17:02:52.979: INFO: Pod name rollover-pod: Found 0 pods out of 1 -Jul 29 17:02:58.012: INFO: Pod name rollover-pod: Found 1 pods out of 1 -STEP: ensuring each pod is running 07/29/23 17:02:58.013 -Jul 29 17:02:58.017: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready -Jul 29 17:03:00.033: INFO: Creating deployment "test-rollover-deployment" -Jul 29 17:03:00.053: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations -Jul 29 17:03:02.071: INFO: Check revision of new replica set for deployment "test-rollover-deployment" -Jul 29 17:03:02.084: INFO: Ensure that both replica sets have 1 created replica -Jul 29 17:03:02.097: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update -Jul 29 17:03:02.128: INFO: Updating deployment test-rollover-deployment -Jul 29 17:03:02.128: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller -Jul 29 17:03:04.143: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2 -Jul 29 17:03:04.155: INFO: Make sure deployment "test-rollover-deployment" is complete -Jul 29 17:03:04.173: INFO: all replica sets need to contain the pod-template-hash label -Jul 29 17:03:04.173: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 3, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 17:03:06.194: INFO: all replica sets need to contain the pod-template-hash label -Jul 29 17:03:06.194: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 3, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 17:03:08.190: INFO: all replica sets need to contain the pod-template-hash label -Jul 29 17:03:08.190: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 3, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 17:03:10.190: INFO: all replica sets need to contain the pod-template-hash label -Jul 29 17:03:10.190: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 3, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 17:03:12.187: INFO: all replica sets need to contain the pod-template-hash label -Jul 29 17:03:12.187: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 3, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} -Jul 29 17:03:14.194: INFO: -Jul 29 17:03:14.195: INFO: Ensure that both old replica sets have no replicas -[AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 -Jul 29 17:03:14.220: INFO: Deployment "test-rollover-deployment": -&Deployment{ObjectMeta:{test-rollover-deployment deployment-583 1e830c24-5d96-4d1c-b763-81ff3c299e56 38557 2 2023-07-29 17:03:00 +0000 UTC map[name:rollover-pod] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2023-07-29 17:03:02 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:minReadySeconds":{},"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:03:13 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003c82768 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-07-29 17:03:00 +0000 UTC,LastTransitionTime:2023-07-29 17:03:00 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rollover-deployment-6c6df9974f" has successfully progressed.,LastUpdateTime:2023-07-29 17:03:13 +0000 UTC,LastTransitionTime:2023-07-29 17:03:00 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - -Jul 29 17:03:14.226: INFO: New ReplicaSet "test-rollover-deployment-6c6df9974f" of Deployment "test-rollover-deployment": -&ReplicaSet{ObjectMeta:{test-rollover-deployment-6c6df9974f deployment-583 39ac8dd2-3689-4629-baae-292693ab0013 38546 2 2023-07-29 17:03:02 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-rollover-deployment 1e830c24-5d96-4d1c-b763-81ff3c299e56 0xc003fc8697 0xc003fc8698}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:03:02 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1e830c24-5d96-4d1c-b763-81ff3c299e56\"}":{}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:03:13 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6c6df9974f,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003fc8748 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} -Jul 29 17:03:14.226: INFO: All old ReplicaSets of Deployment "test-rollover-deployment": -Jul 29 17:03:14.227: INFO: &ReplicaSet{ObjectMeta:{test-rollover-controller deployment-583 e60e2cdc-1b3f-487f-a6e9-9193d20883a0 38556 2 2023-07-29 17:02:52 +0000 UTC map[name:rollover-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2] [{apps/v1 Deployment test-rollover-deployment 1e830c24-5d96-4d1c-b763-81ff3c299e56 0xc003fc8567 0xc003fc8568}] [] [{e2e.test Update apps/v1 2023-07-29 17:02:52 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:03:13 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1e830c24-5d96-4d1c-b763-81ff3c299e56\"}":{}}},"f:spec":{"f:replicas":{}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:03:13 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc003fc8628 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Jul 29 17:03:14.227: INFO: &ReplicaSet{ObjectMeta:{test-rollover-deployment-768dcbc65b deployment-583 2c5cfdb3-1aaa-4313-bb21-5e75034f9f8e 38513 2 2023-07-29 17:03:00 +0000 UTC map[name:rollover-pod pod-template-hash:768dcbc65b] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-rollover-deployment 1e830c24-5d96-4d1c-b763-81ff3c299e56 0xc003fc87b7 0xc003fc87b8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:03:02 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1e830c24-5d96-4d1c-b763-81ff3c299e56\"}":{}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"redis-slave\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:03:02 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 768dcbc65b,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:768dcbc65b] map[] [] [] []} {[] [] [{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003fc8868 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} -Jul 29 17:03:14.233: INFO: Pod "test-rollover-deployment-6c6df9974f-7xvb5" is available: -&Pod{ObjectMeta:{test-rollover-deployment-6c6df9974f-7xvb5 test-rollover-deployment-6c6df9974f- deployment-583 855dd3cd-8153-4b16-9762-5d051f8a3f3d 38521 0 2023-07-29 17:03:02 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[] [{apps/v1 ReplicaSet test-rollover-deployment-6c6df9974f 39ac8dd2-3689-4629-baae-292693ab0013 0xc003c82b17 0xc003c82b18}] [] [{kube-controller-manager Update v1 2023-07-29 17:03:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"39ac8dd2-3689-4629-baae-292693ab0013\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:03:03 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.220\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-kh9p5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kh9p5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:03:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:03:03 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:03:03 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:03:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.220,StartTime:2023-07-29 17:03:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:03:03 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,ImageID:registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e,ContainerID:cri-o://8e2c5f5c7c06e3faec530f5351b66329516b4c54bf3cd6ce586bb308f3b0ab55,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.220,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +[BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 +STEP: Creating service test in namespace statefulset-5572 08/24/23 13:12:17.511 +[It] should perform rolling updates and roll backs of template modifications [Conformance] + test/e2e/apps/statefulset.go:306 +STEP: Creating a new StatefulSet 08/24/23 13:12:17.521 +Aug 24 13:12:17.543: INFO: Found 0 stateful pods, waiting for 3 +Aug 24 13:12:27.556: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 13:12:27.557: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 13:12:27.557: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +Aug 24 13:12:27.573: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-5572 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Aug 24 13:12:27.896: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Aug 24 13:12:27.896: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Aug 24 13:12:27.896: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +STEP: Updating StatefulSet template: update image from registry.k8s.io/e2e-test-images/httpd:2.4.38-4 to registry.k8s.io/e2e-test-images/httpd:2.4.39-4 08/24/23 13:12:37.935 +Aug 24 13:12:37.975: INFO: Updating stateful set ss2 +STEP: Creating a new revision 08/24/23 13:12:37.975 +STEP: Updating Pods in reverse ordinal order 08/24/23 13:12:48.011 +Aug 24 13:12:48.022: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-5572 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Aug 24 13:12:48.290: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Aug 24 13:12:48.290: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Aug 24 13:12:48.290: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +STEP: Rolling back to a previous revision 08/24/23 13:12:58.333 +Aug 24 13:12:58.333: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-5572 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Aug 24 13:12:58.649: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Aug 24 13:12:58.649: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Aug 24 13:12:58.649: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Aug 24 13:13:08.707: INFO: Updating stateful set ss2 +STEP: Rolling back update in reverse ordinal order 08/24/23 13:13:18.742 +Aug 24 13:13:18.751: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-5572 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Aug 24 13:13:19.065: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Aug 24 13:13:19.065: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Aug 24 13:13:19.065: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 +Aug 24 13:13:29.110: INFO: Deleting all statefulset in ns statefulset-5572 +Aug 24 13:13:29.114: INFO: Scaling statefulset ss2 to 0 +Aug 24 13:13:39.147: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 13:13:39.158: INFO: Deleting statefulset ss2 +[AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 -Jul 29 17:03:14.234: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Deployment +Aug 24 13:13:39.207: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 -STEP: Destroying namespace "deployment-583" for this suite. 07/29/23 17:03:14.242 +STEP: Destroying namespace "statefulset-5572" for this suite. 08/24/23 13:13:39.229 ------------------------------ -• [SLOW TEST] [21.342 seconds] -[sig-apps] Deployment +• [SLOW TEST] [81.776 seconds] +[sig-apps] StatefulSet test/e2e/apps/framework.go:23 - deployment should support rollover [Conformance] - test/e2e/apps/deployment.go:132 + Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:103 + should perform rolling updates and roll backs of template modifications [Conformance] + test/e2e/apps/statefulset.go:306 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Deployment + [BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:02:52.912 - Jul 29 17:02:52.912: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename deployment 07/29/23 17:02:52.915 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:02:52.949 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:02:52.955 - [BeforeEach] [sig-apps] Deployment + STEP: Creating a kubernetes client 08/24/23 13:12:17.476 + Aug 24 13:12:17.476: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename statefulset 08/24/23 13:12:17.478 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:12:17.502 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:12:17.506 + [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 - [It] deployment should support rollover [Conformance] - test/e2e/apps/deployment.go:132 - Jul 29 17:02:52.979: INFO: Pod name rollover-pod: Found 0 pods out of 1 - Jul 29 17:02:58.012: INFO: Pod name rollover-pod: Found 1 pods out of 1 - STEP: ensuring each pod is running 07/29/23 17:02:58.013 - Jul 29 17:02:58.017: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready - Jul 29 17:03:00.033: INFO: Creating deployment "test-rollover-deployment" - Jul 29 17:03:00.053: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations - Jul 29 17:03:02.071: INFO: Check revision of new replica set for deployment "test-rollover-deployment" - Jul 29 17:03:02.084: INFO: Ensure that both replica sets have 1 created replica - Jul 29 17:03:02.097: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update - Jul 29 17:03:02.128: INFO: Updating deployment test-rollover-deployment - Jul 29 17:03:02.128: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller - Jul 29 17:03:04.143: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2 - Jul 29 17:03:04.155: INFO: Make sure deployment "test-rollover-deployment" is complete - Jul 29 17:03:04.173: INFO: all replica sets need to contain the pod-template-hash label - Jul 29 17:03:04.173: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 3, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 17:03:06.194: INFO: all replica sets need to contain the pod-template-hash label - Jul 29 17:03:06.194: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 3, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 17:03:08.190: INFO: all replica sets need to contain the pod-template-hash label - Jul 29 17:03:08.190: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 3, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 17:03:10.190: INFO: all replica sets need to contain the pod-template-hash label - Jul 29 17:03:10.190: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 3, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 17:03:12.187: INFO: all replica sets need to contain the pod-template-hash label - Jul 29 17:03:12.187: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 3, 3, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 3, 0, 0, time.Local), Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6c6df9974f\" is progressing."}}, CollisionCount:(*int32)(nil)} - Jul 29 17:03:14.194: INFO: - Jul 29 17:03:14.195: INFO: Ensure that both old replica sets have no replicas - [AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 - Jul 29 17:03:14.220: INFO: Deployment "test-rollover-deployment": - &Deployment{ObjectMeta:{test-rollover-deployment deployment-583 1e830c24-5d96-4d1c-b763-81ff3c299e56 38557 2 2023-07-29 17:03:00 +0000 UTC map[name:rollover-pod] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2023-07-29 17:03:02 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:minReadySeconds":{},"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:03:13 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003c82768 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-07-29 17:03:00 +0000 UTC,LastTransitionTime:2023-07-29 17:03:00 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rollover-deployment-6c6df9974f" has successfully progressed.,LastUpdateTime:2023-07-29 17:03:13 +0000 UTC,LastTransitionTime:2023-07-29 17:03:00 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - - Jul 29 17:03:14.226: INFO: New ReplicaSet "test-rollover-deployment-6c6df9974f" of Deployment "test-rollover-deployment": - &ReplicaSet{ObjectMeta:{test-rollover-deployment-6c6df9974f deployment-583 39ac8dd2-3689-4629-baae-292693ab0013 38546 2 2023-07-29 17:03:02 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-rollover-deployment 1e830c24-5d96-4d1c-b763-81ff3c299e56 0xc003fc8697 0xc003fc8698}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:03:02 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1e830c24-5d96-4d1c-b763-81ff3c299e56\"}":{}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:03:13 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6c6df9974f,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[] [] [] []} {[] [] [{agnhost registry.k8s.io/e2e-test-images/agnhost:2.43 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003fc8748 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} - Jul 29 17:03:14.226: INFO: All old ReplicaSets of Deployment "test-rollover-deployment": - Jul 29 17:03:14.227: INFO: &ReplicaSet{ObjectMeta:{test-rollover-controller deployment-583 e60e2cdc-1b3f-487f-a6e9-9193d20883a0 38556 2 2023-07-29 17:02:52 +0000 UTC map[name:rollover-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2] [{apps/v1 Deployment test-rollover-deployment 1e830c24-5d96-4d1c-b763-81ff3c299e56 0xc003fc8567 0xc003fc8568}] [] [{e2e.test Update apps/v1 2023-07-29 17:02:52 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:03:13 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1e830c24-5d96-4d1c-b763-81ff3c299e56\"}":{}}},"f:spec":{"f:replicas":{}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:03:13 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc003fc8628 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - Jul 29 17:03:14.227: INFO: &ReplicaSet{ObjectMeta:{test-rollover-deployment-768dcbc65b deployment-583 2c5cfdb3-1aaa-4313-bb21-5e75034f9f8e 38513 2 2023-07-29 17:03:00 +0000 UTC map[name:rollover-pod pod-template-hash:768dcbc65b] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-rollover-deployment 1e830c24-5d96-4d1c-b763-81ff3c299e56 0xc003fc87b7 0xc003fc87b8}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:03:02 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"1e830c24-5d96-4d1c-b763-81ff3c299e56\"}":{}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"redis-slave\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:03:02 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 768dcbc65b,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:768dcbc65b] map[] [] [] []} {[] [] [{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003fc8868 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} - Jul 29 17:03:14.233: INFO: Pod "test-rollover-deployment-6c6df9974f-7xvb5" is available: - &Pod{ObjectMeta:{test-rollover-deployment-6c6df9974f-7xvb5 test-rollover-deployment-6c6df9974f- deployment-583 855dd3cd-8153-4b16-9762-5d051f8a3f3d 38521 0 2023-07-29 17:03:02 +0000 UTC map[name:rollover-pod pod-template-hash:6c6df9974f] map[] [{apps/v1 ReplicaSet test-rollover-deployment-6c6df9974f 39ac8dd2-3689-4629-baae-292693ab0013 0xc003c82b17 0xc003c82b18}] [] [{kube-controller-manager Update v1 2023-07-29 17:03:02 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"39ac8dd2-3689-4629-baae-292693ab0013\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:03:03 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.220\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-kh9p5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kh9p5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:03:02 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:03:03 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:03:03 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:03:02 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.220,StartTime:2023-07-29 17:03:02 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:03:03 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/agnhost:2.43,ImageID:registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e,ContainerID:cri-o://8e2c5f5c7c06e3faec530f5351b66329516b4c54bf3cd6ce586bb308f3b0ab55,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.220,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - [AfterEach] [sig-apps] Deployment + [BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 + [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 + STEP: Creating service test in namespace statefulset-5572 08/24/23 13:12:17.511 + [It] should perform rolling updates and roll backs of template modifications [Conformance] + test/e2e/apps/statefulset.go:306 + STEP: Creating a new StatefulSet 08/24/23 13:12:17.521 + Aug 24 13:12:17.543: INFO: Found 0 stateful pods, waiting for 3 + Aug 24 13:12:27.556: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 13:12:27.557: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 13:12:27.557: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true + Aug 24 13:12:27.573: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-5572 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' + Aug 24 13:12:27.896: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" + Aug 24 13:12:27.896: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" + Aug 24 13:12:27.896: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + + STEP: Updating StatefulSet template: update image from registry.k8s.io/e2e-test-images/httpd:2.4.38-4 to registry.k8s.io/e2e-test-images/httpd:2.4.39-4 08/24/23 13:12:37.935 + Aug 24 13:12:37.975: INFO: Updating stateful set ss2 + STEP: Creating a new revision 08/24/23 13:12:37.975 + STEP: Updating Pods in reverse ordinal order 08/24/23 13:12:48.011 + Aug 24 13:12:48.022: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-5572 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' + Aug 24 13:12:48.290: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" + Aug 24 13:12:48.290: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" + Aug 24 13:12:48.290: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + + STEP: Rolling back to a previous revision 08/24/23 13:12:58.333 + Aug 24 13:12:58.333: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-5572 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' + Aug 24 13:12:58.649: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" + Aug 24 13:12:58.649: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" + Aug 24 13:12:58.649: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + + Aug 24 13:13:08.707: INFO: Updating stateful set ss2 + STEP: Rolling back update in reverse ordinal order 08/24/23 13:13:18.742 + Aug 24 13:13:18.751: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=statefulset-5572 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' + Aug 24 13:13:19.065: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" + Aug 24 13:13:19.065: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" + Aug 24 13:13:19.065: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + + [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 + Aug 24 13:13:29.110: INFO: Deleting all statefulset in ns statefulset-5572 + Aug 24 13:13:29.114: INFO: Scaling statefulset ss2 to 0 + Aug 24 13:13:39.147: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 13:13:39.158: INFO: Deleting statefulset ss2 + [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 - Jul 29 17:03:14.234: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Deployment + Aug 24 13:13:39.207: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 - STEP: Destroying namespace "deployment-583" for this suite. 07/29/23 17:03:14.242 + STEP: Destroying namespace "statefulset-5572" for this suite. 08/24/23 13:13:39.229 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSS ------------------------------ -[sig-node] Pods - should contain environment variables for services [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:444 -[BeforeEach] [sig-node] Pods +[sig-auth] ServiceAccounts + should run through the lifecycle of a ServiceAccount [Conformance] + test/e2e/auth/service_accounts.go:649 +[BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:03:14.27 -Jul 29 17:03:14.270: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 17:03:14.275 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:03:14.313 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:03:14.317 -[BeforeEach] [sig-node] Pods +STEP: Creating a kubernetes client 08/24/23 13:13:39.255 +Aug 24 13:13:39.255: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename svcaccounts 08/24/23 13:13:39.258 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:13:39.284 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:13:39.289 +[BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 -[It] should contain environment variables for services [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:444 -Jul 29 17:03:14.339: INFO: Waiting up to 5m0s for pod "server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4" in namespace "pods-7318" to be "running and ready" -Jul 29 17:03:14.346: INFO: Pod "server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4": Phase="Pending", Reason="", readiness=false. Elapsed: 7.196695ms -Jul 29 17:03:14.347: INFO: The phase of Pod server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 17:03:16.356: INFO: Pod "server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4": Phase="Running", Reason="", readiness=true. Elapsed: 2.016645565s -Jul 29 17:03:16.356: INFO: The phase of Pod server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4 is Running (Ready = true) -Jul 29 17:03:16.356: INFO: Pod "server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4" satisfied condition "running and ready" -Jul 29 17:03:16.399: INFO: Waiting up to 5m0s for pod "client-envvars-873dff84-0096-4360-bc5b-46811290c303" in namespace "pods-7318" to be "Succeeded or Failed" -Jul 29 17:03:16.405: INFO: Pod "client-envvars-873dff84-0096-4360-bc5b-46811290c303": Phase="Pending", Reason="", readiness=false. Elapsed: 5.415646ms -Jul 29 17:03:18.414: INFO: Pod "client-envvars-873dff84-0096-4360-bc5b-46811290c303": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01462465s -Jul 29 17:03:20.414: INFO: Pod "client-envvars-873dff84-0096-4360-bc5b-46811290c303": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015069874s -STEP: Saw pod success 07/29/23 17:03:20.414 -Jul 29 17:03:20.415: INFO: Pod "client-envvars-873dff84-0096-4360-bc5b-46811290c303" satisfied condition "Succeeded or Failed" -Jul 29 17:03:20.422: INFO: Trying to get logs from node wetuj3nuajog-3 pod client-envvars-873dff84-0096-4360-bc5b-46811290c303 container env3cont: -STEP: delete the pod 07/29/23 17:03:20.434 -Jul 29 17:03:20.456: INFO: Waiting for pod client-envvars-873dff84-0096-4360-bc5b-46811290c303 to disappear -Jul 29 17:03:20.462: INFO: Pod client-envvars-873dff84-0096-4360-bc5b-46811290c303 no longer exists -[AfterEach] [sig-node] Pods +[It] should run through the lifecycle of a ServiceAccount [Conformance] + test/e2e/auth/service_accounts.go:649 +STEP: creating a ServiceAccount 08/24/23 13:13:39.298 +STEP: watching for the ServiceAccount to be added 08/24/23 13:13:39.312 +STEP: patching the ServiceAccount 08/24/23 13:13:39.314 +STEP: finding ServiceAccount in list of all ServiceAccounts (by LabelSelector) 08/24/23 13:13:39.323 +STEP: deleting the ServiceAccount 08/24/23 13:13:39.33 +[AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 -Jul 29 17:03:20.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods +Aug 24 13:13:39.352: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 -STEP: Destroying namespace "pods-7318" for this suite. 07/29/23 17:03:20.471 +STEP: Destroying namespace "svcaccounts-4877" for this suite. 08/24/23 13:13:39.363 ------------------------------ -• [SLOW TEST] [6.211 seconds] -[sig-node] Pods -test/e2e/common/node/framework.go:23 - should contain environment variables for services [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:444 +• [0.118 seconds] +[sig-auth] ServiceAccounts +test/e2e/auth/framework.go:23 + should run through the lifecycle of a ServiceAccount [Conformance] + test/e2e/auth/service_accounts.go:649 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods + [BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:03:14.27 - Jul 29 17:03:14.270: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 17:03:14.275 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:03:14.313 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:03:14.317 - [BeforeEach] [sig-node] Pods + STEP: Creating a kubernetes client 08/24/23 13:13:39.255 + Aug 24 13:13:39.255: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename svcaccounts 08/24/23 13:13:39.258 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:13:39.284 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:13:39.289 + [BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 - [It] should contain environment variables for services [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:444 - Jul 29 17:03:14.339: INFO: Waiting up to 5m0s for pod "server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4" in namespace "pods-7318" to be "running and ready" - Jul 29 17:03:14.346: INFO: Pod "server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4": Phase="Pending", Reason="", readiness=false. Elapsed: 7.196695ms - Jul 29 17:03:14.347: INFO: The phase of Pod server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 17:03:16.356: INFO: Pod "server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4": Phase="Running", Reason="", readiness=true. Elapsed: 2.016645565s - Jul 29 17:03:16.356: INFO: The phase of Pod server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4 is Running (Ready = true) - Jul 29 17:03:16.356: INFO: Pod "server-envvars-c76665ba-0bbe-449e-ada3-b0dfc9a002a4" satisfied condition "running and ready" - Jul 29 17:03:16.399: INFO: Waiting up to 5m0s for pod "client-envvars-873dff84-0096-4360-bc5b-46811290c303" in namespace "pods-7318" to be "Succeeded or Failed" - Jul 29 17:03:16.405: INFO: Pod "client-envvars-873dff84-0096-4360-bc5b-46811290c303": Phase="Pending", Reason="", readiness=false. Elapsed: 5.415646ms - Jul 29 17:03:18.414: INFO: Pod "client-envvars-873dff84-0096-4360-bc5b-46811290c303": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01462465s - Jul 29 17:03:20.414: INFO: Pod "client-envvars-873dff84-0096-4360-bc5b-46811290c303": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015069874s - STEP: Saw pod success 07/29/23 17:03:20.414 - Jul 29 17:03:20.415: INFO: Pod "client-envvars-873dff84-0096-4360-bc5b-46811290c303" satisfied condition "Succeeded or Failed" - Jul 29 17:03:20.422: INFO: Trying to get logs from node wetuj3nuajog-3 pod client-envvars-873dff84-0096-4360-bc5b-46811290c303 container env3cont: - STEP: delete the pod 07/29/23 17:03:20.434 - Jul 29 17:03:20.456: INFO: Waiting for pod client-envvars-873dff84-0096-4360-bc5b-46811290c303 to disappear - Jul 29 17:03:20.462: INFO: Pod client-envvars-873dff84-0096-4360-bc5b-46811290c303 no longer exists - [AfterEach] [sig-node] Pods + [It] should run through the lifecycle of a ServiceAccount [Conformance] + test/e2e/auth/service_accounts.go:649 + STEP: creating a ServiceAccount 08/24/23 13:13:39.298 + STEP: watching for the ServiceAccount to be added 08/24/23 13:13:39.312 + STEP: patching the ServiceAccount 08/24/23 13:13:39.314 + STEP: finding ServiceAccount in list of all ServiceAccounts (by LabelSelector) 08/24/23 13:13:39.323 + STEP: deleting the ServiceAccount 08/24/23 13:13:39.33 + [AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 - Jul 29 17:03:20.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods + Aug 24 13:13:39.352: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 - STEP: Destroying namespace "pods-7318" for this suite. 07/29/23 17:03:20.471 + STEP: Destroying namespace "svcaccounts-4877" for this suite. 08/24/23 13:13:39.363 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-scheduling] SchedulerPreemption [Serial] - validates basic preemption works [Conformance] - test/e2e/scheduling/preemption.go:130 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of different groups [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:276 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:03:20.488 -Jul 29 17:03:20.488: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename sched-preemption 07/29/23 17:03:20.49 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:03:20.521 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:03:20.525 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] +STEP: Creating a kubernetes client 08/24/23 13:13:39.381 +Aug 24 13:13:39.381: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 13:13:39.383 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:13:39.428 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:13:39.433 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:97 -Jul 29 17:03:20.555: INFO: Waiting up to 1m0s for all nodes to be ready -Jul 29 17:04:20.615: INFO: Waiting for terminating namespaces to be deleted... -[It] validates basic preemption works [Conformance] - test/e2e/scheduling/preemption.go:130 -STEP: Create pods that use 4/5 of node resources. 07/29/23 17:04:20.621 -Jul 29 17:04:20.662: INFO: Created pod: pod0-0-sched-preemption-low-priority -Jul 29 17:04:20.689: INFO: Created pod: pod0-1-sched-preemption-medium-priority -Jul 29 17:04:20.726: INFO: Created pod: pod1-0-sched-preemption-medium-priority -Jul 29 17:04:20.740: INFO: Created pod: pod1-1-sched-preemption-medium-priority -Jul 29 17:04:20.813: INFO: Created pod: pod2-0-sched-preemption-medium-priority -Jul 29 17:04:20.830: INFO: Created pod: pod2-1-sched-preemption-medium-priority -STEP: Wait for pods to be scheduled. 07/29/23 17:04:20.83 -Jul 29 17:04:20.831: INFO: Waiting up to 5m0s for pod "pod0-0-sched-preemption-low-priority" in namespace "sched-preemption-1491" to be "running" -Jul 29 17:04:20.851: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 14.14912ms -Jul 29 17:04:22.890: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 2.053695173s -Jul 29 17:04:24.866: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Running", Reason="", readiness=true. Elapsed: 4.029385411s -Jul 29 17:04:24.866: INFO: Pod "pod0-0-sched-preemption-low-priority" satisfied condition "running" -Jul 29 17:04:24.866: INFO: Waiting up to 5m0s for pod "pod0-1-sched-preemption-medium-priority" in namespace "sched-preemption-1491" to be "running" -Jul 29 17:04:24.875: INFO: Pod "pod0-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 8.531375ms -Jul 29 17:04:24.875: INFO: Pod "pod0-1-sched-preemption-medium-priority" satisfied condition "running" -Jul 29 17:04:24.875: INFO: Waiting up to 5m0s for pod "pod1-0-sched-preemption-medium-priority" in namespace "sched-preemption-1491" to be "running" -Jul 29 17:04:24.883: INFO: Pod "pod1-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 8.244326ms -Jul 29 17:04:24.884: INFO: Pod "pod1-0-sched-preemption-medium-priority" satisfied condition "running" -Jul 29 17:04:24.884: INFO: Waiting up to 5m0s for pod "pod1-1-sched-preemption-medium-priority" in namespace "sched-preemption-1491" to be "running" -Jul 29 17:04:24.890: INFO: Pod "pod1-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.775432ms -Jul 29 17:04:24.890: INFO: Pod "pod1-1-sched-preemption-medium-priority" satisfied condition "running" -Jul 29 17:04:24.890: INFO: Waiting up to 5m0s for pod "pod2-0-sched-preemption-medium-priority" in namespace "sched-preemption-1491" to be "running" -Jul 29 17:04:24.896: INFO: Pod "pod2-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.838556ms -Jul 29 17:04:24.896: INFO: Pod "pod2-0-sched-preemption-medium-priority" satisfied condition "running" -Jul 29 17:04:24.896: INFO: Waiting up to 5m0s for pod "pod2-1-sched-preemption-medium-priority" in namespace "sched-preemption-1491" to be "running" -Jul 29 17:04:24.902: INFO: Pod "pod2-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.944733ms -Jul 29 17:04:24.903: INFO: Pod "pod2-1-sched-preemption-medium-priority" satisfied condition "running" -STEP: Run a high priority pod that has same requirements as that of lower priority pod 07/29/23 17:04:24.903 -Jul 29 17:04:24.916: INFO: Waiting up to 2m0s for pod "preemptor-pod" in namespace "sched-preemption-1491" to be "running" -Jul 29 17:04:24.925: INFO: Pod "preemptor-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 8.28704ms -Jul 29 17:04:26.937: INFO: Pod "preemptor-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020669417s -Jul 29 17:04:28.933: INFO: Pod "preemptor-pod": Phase="Running", Reason="", readiness=true. Elapsed: 4.01626572s -Jul 29 17:04:28.933: INFO: Pod "preemptor-pod" satisfied condition "running" -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] +[It] works for multiple CRDs of different groups [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:276 +STEP: CRs in different groups (two CRDs) show up in OpenAPI documentation 08/24/23 13:13:39.441 +Aug 24 13:13:39.442: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +Aug 24 13:13:42.155: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 17:04:29.022: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:84 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] +Aug 24 13:13:51.770: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] +[DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "sched-preemption-1491" for this suite. 07/29/23 17:04:29.136 +STEP: Destroying namespace "crd-publish-openapi-282" for this suite. 08/24/23 13:13:51.808 ------------------------------ -• [SLOW TEST] [68.661 seconds] -[sig-scheduling] SchedulerPreemption [Serial] -test/e2e/scheduling/framework.go:40 - validates basic preemption works [Conformance] - test/e2e/scheduling/preemption.go:130 +• [SLOW TEST] [12.454 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + works for multiple CRDs of different groups [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:276 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:03:20.488 - Jul 29 17:03:20.488: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename sched-preemption 07/29/23 17:03:20.49 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:03:20.521 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:03:20.525 - [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + STEP: Creating a kubernetes client 08/24/23 13:13:39.381 + Aug 24 13:13:39.381: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename crd-publish-openapi 08/24/23 13:13:39.383 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:13:39.428 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:13:39.433 + [BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:97 - Jul 29 17:03:20.555: INFO: Waiting up to 1m0s for all nodes to be ready - Jul 29 17:04:20.615: INFO: Waiting for terminating namespaces to be deleted... - [It] validates basic preemption works [Conformance] - test/e2e/scheduling/preemption.go:130 - STEP: Create pods that use 4/5 of node resources. 07/29/23 17:04:20.621 - Jul 29 17:04:20.662: INFO: Created pod: pod0-0-sched-preemption-low-priority - Jul 29 17:04:20.689: INFO: Created pod: pod0-1-sched-preemption-medium-priority - Jul 29 17:04:20.726: INFO: Created pod: pod1-0-sched-preemption-medium-priority - Jul 29 17:04:20.740: INFO: Created pod: pod1-1-sched-preemption-medium-priority - Jul 29 17:04:20.813: INFO: Created pod: pod2-0-sched-preemption-medium-priority - Jul 29 17:04:20.830: INFO: Created pod: pod2-1-sched-preemption-medium-priority - STEP: Wait for pods to be scheduled. 07/29/23 17:04:20.83 - Jul 29 17:04:20.831: INFO: Waiting up to 5m0s for pod "pod0-0-sched-preemption-low-priority" in namespace "sched-preemption-1491" to be "running" - Jul 29 17:04:20.851: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 14.14912ms - Jul 29 17:04:22.890: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Pending", Reason="", readiness=false. Elapsed: 2.053695173s - Jul 29 17:04:24.866: INFO: Pod "pod0-0-sched-preemption-low-priority": Phase="Running", Reason="", readiness=true. Elapsed: 4.029385411s - Jul 29 17:04:24.866: INFO: Pod "pod0-0-sched-preemption-low-priority" satisfied condition "running" - Jul 29 17:04:24.866: INFO: Waiting up to 5m0s for pod "pod0-1-sched-preemption-medium-priority" in namespace "sched-preemption-1491" to be "running" - Jul 29 17:04:24.875: INFO: Pod "pod0-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 8.531375ms - Jul 29 17:04:24.875: INFO: Pod "pod0-1-sched-preemption-medium-priority" satisfied condition "running" - Jul 29 17:04:24.875: INFO: Waiting up to 5m0s for pod "pod1-0-sched-preemption-medium-priority" in namespace "sched-preemption-1491" to be "running" - Jul 29 17:04:24.883: INFO: Pod "pod1-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 8.244326ms - Jul 29 17:04:24.884: INFO: Pod "pod1-0-sched-preemption-medium-priority" satisfied condition "running" - Jul 29 17:04:24.884: INFO: Waiting up to 5m0s for pod "pod1-1-sched-preemption-medium-priority" in namespace "sched-preemption-1491" to be "running" - Jul 29 17:04:24.890: INFO: Pod "pod1-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.775432ms - Jul 29 17:04:24.890: INFO: Pod "pod1-1-sched-preemption-medium-priority" satisfied condition "running" - Jul 29 17:04:24.890: INFO: Waiting up to 5m0s for pod "pod2-0-sched-preemption-medium-priority" in namespace "sched-preemption-1491" to be "running" - Jul 29 17:04:24.896: INFO: Pod "pod2-0-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.838556ms - Jul 29 17:04:24.896: INFO: Pod "pod2-0-sched-preemption-medium-priority" satisfied condition "running" - Jul 29 17:04:24.896: INFO: Waiting up to 5m0s for pod "pod2-1-sched-preemption-medium-priority" in namespace "sched-preemption-1491" to be "running" - Jul 29 17:04:24.902: INFO: Pod "pod2-1-sched-preemption-medium-priority": Phase="Running", Reason="", readiness=true. Elapsed: 5.944733ms - Jul 29 17:04:24.903: INFO: Pod "pod2-1-sched-preemption-medium-priority" satisfied condition "running" - STEP: Run a high priority pod that has same requirements as that of lower priority pod 07/29/23 17:04:24.903 - Jul 29 17:04:24.916: INFO: Waiting up to 2m0s for pod "preemptor-pod" in namespace "sched-preemption-1491" to be "running" - Jul 29 17:04:24.925: INFO: Pod "preemptor-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 8.28704ms - Jul 29 17:04:26.937: INFO: Pod "preemptor-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020669417s - Jul 29 17:04:28.933: INFO: Pod "preemptor-pod": Phase="Running", Reason="", readiness=true. Elapsed: 4.01626572s - Jul 29 17:04:28.933: INFO: Pod "preemptor-pod" satisfied condition "running" - [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + [It] works for multiple CRDs of different groups [Conformance] + test/e2e/apimachinery/crd_publish_openapi.go:276 + STEP: CRs in different groups (two CRDs) show up in OpenAPI documentation 08/24/23 13:13:39.441 + Aug 24 13:13:39.442: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + Aug 24 13:13:42.155: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + [AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 17:04:29.022: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-scheduling] SchedulerPreemption [Serial] - test/e2e/scheduling/preemption.go:84 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + Aug 24 13:13:51.770: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-scheduling] SchedulerPreemption [Serial] + [DeferCleanup (Each)] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "sched-preemption-1491" for this suite. 07/29/23 17:04:29.136 + STEP: Destroying namespace "crd-publish-openapi-282" for this suite. 08/24/23 13:13:51.808 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSS +SS ------------------------------ -[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] - Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] - test/e2e/apps/statefulset.go:697 -[BeforeEach] [sig-apps] StatefulSet +[sig-node] Container Runtime blackbox test on terminated container + should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:216 +[BeforeEach] [sig-node] Container Runtime set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:04:29.155 -Jul 29 17:04:29.155: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename statefulset 07/29/23 17:04:29.16 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:04:29.194 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:04:29.199 -[BeforeEach] [sig-apps] StatefulSet +STEP: Creating a kubernetes client 08/24/23 13:13:51.838 +Aug 24 13:13:51.838: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-runtime 08/24/23 13:13:51.84 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:13:51.864 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:13:51.869 +[BeforeEach] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 -[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 -STEP: Creating service test in namespace statefulset-3316 07/29/23 17:04:29.203 -[It] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] - test/e2e/apps/statefulset.go:697 -STEP: Creating stateful set ss in namespace statefulset-3316 07/29/23 17:04:29.213 -STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-3316 07/29/23 17:04:29.226 -Jul 29 17:04:29.233: INFO: Found 0 stateful pods, waiting for 1 -Jul 29 17:04:39.246: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod 07/29/23 17:04:39.246 -Jul 29 17:04:39.253: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Jul 29 17:04:39.535: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Jul 29 17:04:39.535: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Jul 29 17:04:39.535: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Jul 29 17:04:39.541: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true -Jul 29 17:04:49.551: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false -Jul 29 17:04:49.551: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 17:04:49.583: INFO: POD NODE PHASE GRACE CONDITIONS -Jul 29 17:04:49.583: INFO: ss-0 wetuj3nuajog-3 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:29 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:39 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:39 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:29 +0000 UTC }] -Jul 29 17:04:49.584: INFO: -Jul 29 17:04:49.584: INFO: StatefulSet ss has not reached scale 3, at 1 -Jul 29 17:04:50.590: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.992673852s -Jul 29 17:04:51.602: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.986547784s -Jul 29 17:04:52.612: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.974004882s -Jul 29 17:04:53.625: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.964257442s -Jul 29 17:04:54.634: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.951376736s -Jul 29 17:04:55.642: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.942158842s -Jul 29 17:04:56.653: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.934698013s -Jul 29 17:04:57.662: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.923327397s -Jul 29 17:04:58.673: INFO: Verifying statefulset ss doesn't scale past 3 for another 914.081424ms -STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-3316 07/29/23 17:04:59.675 -Jul 29 17:04:59.703: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Jul 29 17:04:59.952: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" -Jul 29 17:04:59.952: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Jul 29 17:04:59.952: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Jul 29 17:04:59.952: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Jul 29 17:05:00.194: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" -Jul 29 17:05:00.194: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Jul 29 17:05:00.194: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Jul 29 17:05:00.195: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' -Jul 29 17:05:00.461: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" -Jul 29 17:05:00.461: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" -Jul 29 17:05:00.461: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - -Jul 29 17:05:00.474: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false -Jul 29 17:05:10.484: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 17:05:10.484: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true -Jul 29 17:05:10.484: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true -STEP: Scale down will not halt with unhealthy stateful pod 07/29/23 17:05:10.484 -Jul 29 17:05:10.491: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Jul 29 17:05:10.713: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Jul 29 17:05:10.713: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Jul 29 17:05:10.713: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Jul 29 17:05:10.713: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Jul 29 17:05:10.959: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Jul 29 17:05:10.959: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Jul 29 17:05:10.959: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Jul 29 17:05:10.959: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' -Jul 29 17:05:11.214: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" -Jul 29 17:05:11.214: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" -Jul 29 17:05:11.214: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - -Jul 29 17:05:11.214: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 17:05:11.222: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 3 -Jul 29 17:05:21.238: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false -Jul 29 17:05:21.239: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false -Jul 29 17:05:21.239: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false -Jul 29 17:05:21.266: INFO: POD NODE PHASE GRACE CONDITIONS -Jul 29 17:05:21.267: INFO: ss-0 wetuj3nuajog-3 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:29 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:29 +0000 UTC }] -Jul 29 17:05:21.267: INFO: ss-1 wetuj3nuajog-2 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC }] -Jul 29 17:05:21.267: INFO: ss-2 wetuj3nuajog-1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC }] -Jul 29 17:05:21.267: INFO: -Jul 29 17:05:21.267: INFO: StatefulSet ss has not reached scale 0, at 3 -Jul 29 17:05:22.275: INFO: POD NODE PHASE GRACE CONDITIONS -Jul 29 17:05:22.275: INFO: ss-1 wetuj3nuajog-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC }] -Jul 29 17:05:22.275: INFO: -Jul 29 17:05:22.275: INFO: StatefulSet ss has not reached scale 0, at 1 -Jul 29 17:05:23.284: INFO: Verifying statefulset ss doesn't scale past 0 for another 7.983350699s -Jul 29 17:05:24.296: INFO: Verifying statefulset ss doesn't scale past 0 for another 6.974965405s -Jul 29 17:05:25.305: INFO: Verifying statefulset ss doesn't scale past 0 for another 5.962915743s -Jul 29 17:05:26.311: INFO: Verifying statefulset ss doesn't scale past 0 for another 4.954151603s -Jul 29 17:05:27.321: INFO: Verifying statefulset ss doesn't scale past 0 for another 3.948211735s -Jul 29 17:05:28.327: INFO: Verifying statefulset ss doesn't scale past 0 for another 2.938107763s -Jul 29 17:05:29.338: INFO: Verifying statefulset ss doesn't scale past 0 for another 1.931567351s -Jul 29 17:05:30.345: INFO: Verifying statefulset ss doesn't scale past 0 for another 921.14601ms -STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-3316 07/29/23 17:05:31.345 -Jul 29 17:05:31.356: INFO: Scaling statefulset ss to 0 -Jul 29 17:05:31.376: INFO: Waiting for statefulset status.replicas updated to 0 -[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 -Jul 29 17:05:31.382: INFO: Deleting all statefulset in ns statefulset-3316 -Jul 29 17:05:31.387: INFO: Scaling statefulset ss to 0 -Jul 29 17:05:31.404: INFO: Waiting for statefulset status.replicas updated to 0 -Jul 29 17:05:31.409: INFO: Deleting statefulset ss -[AfterEach] [sig-apps] StatefulSet +[It] should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:216 +STEP: create the container 08/24/23 13:13:51.873 +STEP: wait for the container to reach Failed 08/24/23 13:13:51.926 +STEP: get the container status 08/24/23 13:13:55.966 +STEP: the container should be terminated 08/24/23 13:13:55.972 +STEP: the termination message should be set 08/24/23 13:13:55.972 +Aug 24 13:13:55.973: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- +STEP: delete the container 08/24/23 13:13:55.973 +[AfterEach] [sig-node] Container Runtime test/e2e/framework/node/init/init.go:32 -Jul 29 17:05:31.432: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] StatefulSet +Aug 24 13:13:55.992: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-node] Container Runtime dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] StatefulSet +[DeferCleanup (Each)] [sig-node] Container Runtime tear down framework | framework.go:193 -STEP: Destroying namespace "statefulset-3316" for this suite. 07/29/23 17:05:31.442 +STEP: Destroying namespace "container-runtime-6740" for this suite. 08/24/23 13:13:55.998 ------------------------------ -• [SLOW TEST] [62.299 seconds] -[sig-apps] StatefulSet -test/e2e/apps/framework.go:23 - Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:103 - Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] - test/e2e/apps/statefulset.go:697 +• [4.170 seconds] +[sig-node] Container Runtime +test/e2e/common/node/framework.go:23 + blackbox test + test/e2e/common/node/runtime.go:44 + on terminated container + test/e2e/common/node/runtime.go:137 + should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:216 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] StatefulSet + [BeforeEach] [sig-node] Container Runtime set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:04:29.155 - Jul 29 17:04:29.155: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename statefulset 07/29/23 17:04:29.16 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:04:29.194 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:04:29.199 - [BeforeEach] [sig-apps] StatefulSet + STEP: Creating a kubernetes client 08/24/23 13:13:51.838 + Aug 24 13:13:51.838: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-runtime 08/24/23 13:13:51.84 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:13:51.864 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:13:51.869 + [BeforeEach] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] StatefulSet - test/e2e/apps/statefulset.go:98 - [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:113 - STEP: Creating service test in namespace statefulset-3316 07/29/23 17:04:29.203 - [It] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] - test/e2e/apps/statefulset.go:697 - STEP: Creating stateful set ss in namespace statefulset-3316 07/29/23 17:04:29.213 - STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-3316 07/29/23 17:04:29.226 - Jul 29 17:04:29.233: INFO: Found 0 stateful pods, waiting for 1 - Jul 29 17:04:39.246: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true - STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod 07/29/23 17:04:39.246 - Jul 29 17:04:39.253: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' - Jul 29 17:04:39.535: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" - Jul 29 17:04:39.535: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" - Jul 29 17:04:39.535: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - - Jul 29 17:04:39.541: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true - Jul 29 17:04:49.551: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false - Jul 29 17:04:49.551: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 17:04:49.583: INFO: POD NODE PHASE GRACE CONDITIONS - Jul 29 17:04:49.583: INFO: ss-0 wetuj3nuajog-3 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:29 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:39 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:39 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:29 +0000 UTC }] - Jul 29 17:04:49.584: INFO: - Jul 29 17:04:49.584: INFO: StatefulSet ss has not reached scale 3, at 1 - Jul 29 17:04:50.590: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.992673852s - Jul 29 17:04:51.602: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.986547784s - Jul 29 17:04:52.612: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.974004882s - Jul 29 17:04:53.625: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.964257442s - Jul 29 17:04:54.634: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.951376736s - Jul 29 17:04:55.642: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.942158842s - Jul 29 17:04:56.653: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.934698013s - Jul 29 17:04:57.662: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.923327397s - Jul 29 17:04:58.673: INFO: Verifying statefulset ss doesn't scale past 3 for another 914.081424ms - STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-3316 07/29/23 17:04:59.675 - Jul 29 17:04:59.703: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' - Jul 29 17:04:59.952: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" - Jul 29 17:04:59.952: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" - Jul 29 17:04:59.952: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - - Jul 29 17:04:59.952: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' - Jul 29 17:05:00.194: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" - Jul 29 17:05:00.194: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" - Jul 29 17:05:00.194: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - - Jul 29 17:05:00.195: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' - Jul 29 17:05:00.461: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" - Jul 29 17:05:00.461: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" - Jul 29 17:05:00.461: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' - - Jul 29 17:05:00.474: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false - Jul 29 17:05:10.484: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 17:05:10.484: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true - Jul 29 17:05:10.484: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true - STEP: Scale down will not halt with unhealthy stateful pod 07/29/23 17:05:10.484 - Jul 29 17:05:10.491: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' - Jul 29 17:05:10.713: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" - Jul 29 17:05:10.713: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" - Jul 29 17:05:10.713: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - - Jul 29 17:05:10.713: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' - Jul 29 17:05:10.959: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" - Jul 29 17:05:10.959: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" - Jul 29 17:05:10.959: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - - Jul 29 17:05:10.959: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=statefulset-3316 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' - Jul 29 17:05:11.214: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" - Jul 29 17:05:11.214: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" - Jul 29 17:05:11.214: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' - - Jul 29 17:05:11.214: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 17:05:11.222: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 3 - Jul 29 17:05:21.238: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false - Jul 29 17:05:21.239: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false - Jul 29 17:05:21.239: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false - Jul 29 17:05:21.266: INFO: POD NODE PHASE GRACE CONDITIONS - Jul 29 17:05:21.267: INFO: ss-0 wetuj3nuajog-3 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:29 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:29 +0000 UTC }] - Jul 29 17:05:21.267: INFO: ss-1 wetuj3nuajog-2 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC }] - Jul 29 17:05:21.267: INFO: ss-2 wetuj3nuajog-1 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC }] - Jul 29 17:05:21.267: INFO: - Jul 29 17:05:21.267: INFO: StatefulSet ss has not reached scale 0, at 3 - Jul 29 17:05:22.275: INFO: POD NODE PHASE GRACE CONDITIONS - Jul 29 17:05:22.275: INFO: ss-1 wetuj3nuajog-2 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:05:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2023-07-29 17:04:49 +0000 UTC }] - Jul 29 17:05:22.275: INFO: - Jul 29 17:05:22.275: INFO: StatefulSet ss has not reached scale 0, at 1 - Jul 29 17:05:23.284: INFO: Verifying statefulset ss doesn't scale past 0 for another 7.983350699s - Jul 29 17:05:24.296: INFO: Verifying statefulset ss doesn't scale past 0 for another 6.974965405s - Jul 29 17:05:25.305: INFO: Verifying statefulset ss doesn't scale past 0 for another 5.962915743s - Jul 29 17:05:26.311: INFO: Verifying statefulset ss doesn't scale past 0 for another 4.954151603s - Jul 29 17:05:27.321: INFO: Verifying statefulset ss doesn't scale past 0 for another 3.948211735s - Jul 29 17:05:28.327: INFO: Verifying statefulset ss doesn't scale past 0 for another 2.938107763s - Jul 29 17:05:29.338: INFO: Verifying statefulset ss doesn't scale past 0 for another 1.931567351s - Jul 29 17:05:30.345: INFO: Verifying statefulset ss doesn't scale past 0 for another 921.14601ms - STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-3316 07/29/23 17:05:31.345 - Jul 29 17:05:31.356: INFO: Scaling statefulset ss to 0 - Jul 29 17:05:31.376: INFO: Waiting for statefulset status.replicas updated to 0 - [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] - test/e2e/apps/statefulset.go:124 - Jul 29 17:05:31.382: INFO: Deleting all statefulset in ns statefulset-3316 - Jul 29 17:05:31.387: INFO: Scaling statefulset ss to 0 - Jul 29 17:05:31.404: INFO: Waiting for statefulset status.replicas updated to 0 - Jul 29 17:05:31.409: INFO: Deleting statefulset ss - [AfterEach] [sig-apps] StatefulSet + [It] should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + test/e2e/common/node/runtime.go:216 + STEP: create the container 08/24/23 13:13:51.873 + STEP: wait for the container to reach Failed 08/24/23 13:13:51.926 + STEP: get the container status 08/24/23 13:13:55.966 + STEP: the container should be terminated 08/24/23 13:13:55.972 + STEP: the termination message should be set 08/24/23 13:13:55.972 + Aug 24 13:13:55.973: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- + STEP: delete the container 08/24/23 13:13:55.973 + [AfterEach] [sig-node] Container Runtime test/e2e/framework/node/init/init.go:32 - Jul 29 17:05:31.432: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] StatefulSet + Aug 24 13:13:55.992: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Container Runtime test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-node] Container Runtime dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] StatefulSet + [DeferCleanup (Each)] [sig-node] Container Runtime tear down framework | framework.go:193 - STEP: Destroying namespace "statefulset-3316" for this suite. 07/29/23 17:05:31.442 + STEP: Destroying namespace "container-runtime-6740" for this suite. 08/24/23 13:13:55.998 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] server version - should find the server version [Conformance] - test/e2e/apimachinery/server_version.go:39 -[BeforeEach] [sig-api-machinery] server version +[sig-node] InitContainer [NodeConformance] + should invoke init containers on a RestartNever pod [Conformance] + test/e2e/common/node/init_container.go:177 +[BeforeEach] [sig-node] InitContainer [NodeConformance] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:05:31.463 -Jul 29 17:05:31.464: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename server-version 07/29/23 17:05:31.466 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:31.501 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:31.507 -[BeforeEach] [sig-api-machinery] server version +STEP: Creating a kubernetes client 08/24/23 13:13:56.012 +Aug 24 13:13:56.012: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename init-container 08/24/23 13:13:56.013 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:13:56.04 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:13:56.048 +[BeforeEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:31 -[It] should find the server version [Conformance] - test/e2e/apimachinery/server_version.go:39 -STEP: Request ServerVersion 07/29/23 17:05:31.513 -STEP: Confirm major version 07/29/23 17:05:31.514 -Jul 29 17:05:31.515: INFO: Major version: 1 -STEP: Confirm minor version 07/29/23 17:05:31.515 -Jul 29 17:05:31.515: INFO: cleanMinorVersion: 26 -Jul 29 17:05:31.515: INFO: Minor version: 26 -[AfterEach] [sig-api-machinery] server version +[BeforeEach] [sig-node] InitContainer [NodeConformance] + test/e2e/common/node/init_container.go:165 +[It] should invoke init containers on a RestartNever pod [Conformance] + test/e2e/common/node/init_container.go:177 +STEP: creating the pod 08/24/23 13:13:56.052 +Aug 24 13:13:56.052: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/node/init/init.go:32 -Jul 29 17:05:31.515: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] server version +Aug 24 13:14:01.257: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] server version +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] server version +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] tear down framework | framework.go:193 -STEP: Destroying namespace "server-version-274" for this suite. 07/29/23 17:05:31.523 +STEP: Destroying namespace "init-container-8063" for this suite. 08/24/23 13:14:01.271 ------------------------------ -• [0.075 seconds] -[sig-api-machinery] server version -test/e2e/apimachinery/framework.go:23 - should find the server version [Conformance] - test/e2e/apimachinery/server_version.go:39 +• [SLOW TEST] [5.269 seconds] +[sig-node] InitContainer [NodeConformance] +test/e2e/common/node/framework.go:23 + should invoke init containers on a RestartNever pod [Conformance] + test/e2e/common/node/init_container.go:177 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] server version + [BeforeEach] [sig-node] InitContainer [NodeConformance] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:05:31.463 - Jul 29 17:05:31.464: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename server-version 07/29/23 17:05:31.466 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:31.501 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:31.507 - [BeforeEach] [sig-api-machinery] server version + STEP: Creating a kubernetes client 08/24/23 13:13:56.012 + Aug 24 13:13:56.012: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename init-container 08/24/23 13:13:56.013 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:13:56.04 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:13:56.048 + [BeforeEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:31 - [It] should find the server version [Conformance] - test/e2e/apimachinery/server_version.go:39 - STEP: Request ServerVersion 07/29/23 17:05:31.513 - STEP: Confirm major version 07/29/23 17:05:31.514 - Jul 29 17:05:31.515: INFO: Major version: 1 - STEP: Confirm minor version 07/29/23 17:05:31.515 - Jul 29 17:05:31.515: INFO: cleanMinorVersion: 26 - Jul 29 17:05:31.515: INFO: Minor version: 26 - [AfterEach] [sig-api-machinery] server version + [BeforeEach] [sig-node] InitContainer [NodeConformance] + test/e2e/common/node/init_container.go:165 + [It] should invoke init containers on a RestartNever pod [Conformance] + test/e2e/common/node/init_container.go:177 + STEP: creating the pod 08/24/23 13:13:56.052 + Aug 24 13:13:56.052: INFO: PodSpec: initContainers in spec.initContainers + [AfterEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/node/init/init.go:32 - Jul 29 17:05:31.515: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] server version + Aug 24 13:14:01.257: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] server version + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] server version + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] tear down framework | framework.go:193 - STEP: Destroying namespace "server-version-274" for this suite. 07/29/23 17:05:31.523 + STEP: Destroying namespace "init-container-8063" for this suite. 08/24/23 13:14:01.271 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSS +SSSSSSSSSSSS ------------------------------ -[sig-storage] Projected combined - should project all components that make up the projection API [Projection][NodeConformance] [Conformance] - test/e2e/common/storage/projected_combined.go:44 -[BeforeEach] [sig-storage] Projected combined +[sig-node] PodTemplates + should replace a pod template [Conformance] + test/e2e/common/node/podtemplates.go:176 +[BeforeEach] [sig-node] PodTemplates set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:05:31.539 -Jul 29 17:05:31.539: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 17:05:31.542 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:31.574 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:31.58 -[BeforeEach] [sig-storage] Projected combined +STEP: Creating a kubernetes client 08/24/23 13:14:01.282 +Aug 24 13:14:01.283: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename podtemplate 08/24/23 13:14:01.285 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:14:01.31 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:14:01.315 +[BeforeEach] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:31 -[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance] - test/e2e/common/storage/projected_combined.go:44 -STEP: Creating configMap with name configmap-projected-all-test-volume-1b21394e-dabc-47c9-8a26-a93e9917b8ce 07/29/23 17:05:31.584 -STEP: Creating secret with name secret-projected-all-test-volume-c9b71c96-660c-4d38-b449-9e48aabc2d55 07/29/23 17:05:31.593 -STEP: Creating a pod to test Check all projections for projected volume plugin 07/29/23 17:05:31.601 -Jul 29 17:05:31.620: INFO: Waiting up to 5m0s for pod "projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48" in namespace "projected-4873" to be "Succeeded or Failed" -Jul 29 17:05:31.626: INFO: Pod "projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48": Phase="Pending", Reason="", readiness=false. Elapsed: 5.220582ms -Jul 29 17:05:33.636: INFO: Pod "projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015862622s -Jul 29 17:05:35.633: INFO: Pod "projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012864179s -STEP: Saw pod success 07/29/23 17:05:35.633 -Jul 29 17:05:35.634: INFO: Pod "projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48" satisfied condition "Succeeded or Failed" -Jul 29 17:05:35.640: INFO: Trying to get logs from node wetuj3nuajog-3 pod projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48 container projected-all-volume-test: -STEP: delete the pod 07/29/23 17:05:35.665 -Jul 29 17:05:35.689: INFO: Waiting for pod projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48 to disappear -Jul 29 17:05:35.696: INFO: Pod projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48 no longer exists -[AfterEach] [sig-storage] Projected combined +[It] should replace a pod template [Conformance] + test/e2e/common/node/podtemplates.go:176 +STEP: Create a pod template 08/24/23 13:14:01.319 +STEP: Replace a pod template 08/24/23 13:14:01.327 +Aug 24 13:14:01.351: INFO: Found updated podtemplate annotation: "true" + +[AfterEach] [sig-node] PodTemplates test/e2e/framework/node/init/init.go:32 -Jul 29 17:05:35.696: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected combined +Aug 24 13:14:01.351: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected combined +[DeferCleanup (Each)] [sig-node] PodTemplates dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected combined +[DeferCleanup (Each)] [sig-node] PodTemplates tear down framework | framework.go:193 -STEP: Destroying namespace "projected-4873" for this suite. 07/29/23 17:05:35.704 +STEP: Destroying namespace "podtemplate-9998" for this suite. 08/24/23 13:14:01.363 ------------------------------ -• [4.176 seconds] -[sig-storage] Projected combined -test/e2e/common/storage/framework.go:23 - should project all components that make up the projection API [Projection][NodeConformance] [Conformance] - test/e2e/common/storage/projected_combined.go:44 +• [0.094 seconds] +[sig-node] PodTemplates +test/e2e/common/node/framework.go:23 + should replace a pod template [Conformance] + test/e2e/common/node/podtemplates.go:176 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected combined + [BeforeEach] [sig-node] PodTemplates set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:05:31.539 - Jul 29 17:05:31.539: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 17:05:31.542 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:31.574 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:31.58 - [BeforeEach] [sig-storage] Projected combined + STEP: Creating a kubernetes client 08/24/23 13:14:01.282 + Aug 24 13:14:01.283: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename podtemplate 08/24/23 13:14:01.285 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:14:01.31 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:14:01.315 + [BeforeEach] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:31 - [It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance] - test/e2e/common/storage/projected_combined.go:44 - STEP: Creating configMap with name configmap-projected-all-test-volume-1b21394e-dabc-47c9-8a26-a93e9917b8ce 07/29/23 17:05:31.584 - STEP: Creating secret with name secret-projected-all-test-volume-c9b71c96-660c-4d38-b449-9e48aabc2d55 07/29/23 17:05:31.593 - STEP: Creating a pod to test Check all projections for projected volume plugin 07/29/23 17:05:31.601 - Jul 29 17:05:31.620: INFO: Waiting up to 5m0s for pod "projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48" in namespace "projected-4873" to be "Succeeded or Failed" - Jul 29 17:05:31.626: INFO: Pod "projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48": Phase="Pending", Reason="", readiness=false. Elapsed: 5.220582ms - Jul 29 17:05:33.636: INFO: Pod "projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015862622s - Jul 29 17:05:35.633: INFO: Pod "projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012864179s - STEP: Saw pod success 07/29/23 17:05:35.633 - Jul 29 17:05:35.634: INFO: Pod "projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48" satisfied condition "Succeeded or Failed" - Jul 29 17:05:35.640: INFO: Trying to get logs from node wetuj3nuajog-3 pod projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48 container projected-all-volume-test: - STEP: delete the pod 07/29/23 17:05:35.665 - Jul 29 17:05:35.689: INFO: Waiting for pod projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48 to disappear - Jul 29 17:05:35.696: INFO: Pod projected-volume-43ace82d-933a-4e3a-83dd-bc13613e3b48 no longer exists - [AfterEach] [sig-storage] Projected combined + [It] should replace a pod template [Conformance] + test/e2e/common/node/podtemplates.go:176 + STEP: Create a pod template 08/24/23 13:14:01.319 + STEP: Replace a pod template 08/24/23 13:14:01.327 + Aug 24 13:14:01.351: INFO: Found updated podtemplate annotation: "true" + + [AfterEach] [sig-node] PodTemplates test/e2e/framework/node/init/init.go:32 - Jul 29 17:05:35.696: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected combined + Aug 24 13:14:01.351: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] PodTemplates test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected combined + [DeferCleanup (Each)] [sig-node] PodTemplates dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected combined + [DeferCleanup (Each)] [sig-node] PodTemplates tear down framework | framework.go:193 - STEP: Destroying namespace "projected-4873" for this suite. 07/29/23 17:05:35.704 + STEP: Destroying namespace "podtemplate-9998" for this suite. 08/24/23 13:14:01.363 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Services - should be able to change the type from NodePort to ExternalName [Conformance] - test/e2e/network/service.go:1557 -[BeforeEach] [sig-network] Services +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:109 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:05:35.723 -Jul 29 17:05:35.723: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 17:05:35.725 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:35.758 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:35.764 -[BeforeEach] [sig-network] Services +STEP: Creating a kubernetes client 08/24/23 13:14:01.381 +Aug 24 13:14:01.381: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 13:14:01.383 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:14:01.459 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:14:01.467 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 -[It] should be able to change the type from NodePort to ExternalName [Conformance] - test/e2e/network/service.go:1557 -STEP: creating a service nodeport-service with the type=NodePort in namespace services-3193 07/29/23 17:05:35.77 -STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service 07/29/23 17:05:35.815 -STEP: creating service externalsvc in namespace services-3193 07/29/23 17:05:35.815 -STEP: creating replication controller externalsvc in namespace services-3193 07/29/23 17:05:35.851 -I0729 17:05:35.871729 13 runners.go:193] Created replication controller with name: externalsvc, namespace: services-3193, replica count: 2 -I0729 17:05:38.923307 13 runners.go:193] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -STEP: changing the NodePort service to type=ExternalName 07/29/23 17:05:38.929 -Jul 29 17:05:38.982: INFO: Creating new exec pod -Jul 29 17:05:39.007: INFO: Waiting up to 5m0s for pod "execpodn292k" in namespace "services-3193" to be "running" -Jul 29 17:05:39.022: INFO: Pod "execpodn292k": Phase="Pending", Reason="", readiness=false. Elapsed: 14.327731ms -Jul 29 17:05:41.028: INFO: Pod "execpodn292k": Phase="Running", Reason="", readiness=true. Elapsed: 2.020243087s -Jul 29 17:05:41.028: INFO: Pod "execpodn292k" satisfied condition "running" -Jul 29 17:05:41.028: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-3193 exec execpodn292k -- /bin/sh -x -c nslookup nodeport-service.services-3193.svc.cluster.local' -Jul 29 17:05:41.373: INFO: stderr: "+ nslookup nodeport-service.services-3193.svc.cluster.local\n" -Jul 29 17:05:41.373: INFO: stdout: "Server:\t\t10.233.0.10\nAddress:\t10.233.0.10#53\n\nnodeport-service.services-3193.svc.cluster.local\tcanonical name = externalsvc.services-3193.svc.cluster.local.\nName:\texternalsvc.services-3193.svc.cluster.local\nAddress: 10.233.25.238\n\n" -STEP: deleting ReplicationController externalsvc in namespace services-3193, will wait for the garbage collector to delete the pods 07/29/23 17:05:41.373 -Jul 29 17:05:41.447: INFO: Deleting ReplicationController externalsvc took: 14.376028ms -Jul 29 17:05:41.548: INFO: Terminating ReplicationController externalsvc pods took: 100.162513ms -Jul 29 17:05:43.891: INFO: Cleaning up the NodePort to ExternalName test service -[AfterEach] [sig-network] Services +[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:109 +STEP: Creating configMap with name configmap-test-volume-map-20aa8bf0-1eb2-43d3-8958-717c63950716 08/24/23 13:14:01.471 +STEP: Creating a pod to test consume configMaps 08/24/23 13:14:01.48 +Aug 24 13:14:01.505: INFO: Waiting up to 5m0s for pod "pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e" in namespace "configmap-2878" to be "Succeeded or Failed" +Aug 24 13:14:01.511: INFO: Pod "pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e": Phase="Pending", Reason="", readiness=false. Elapsed: 5.818679ms +Aug 24 13:14:03.519: INFO: Pod "pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01444033s +Aug 24 13:14:05.518: INFO: Pod "pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012996637s +STEP: Saw pod success 08/24/23 13:14:05.518 +Aug 24 13:14:05.518: INFO: Pod "pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e" satisfied condition "Succeeded or Failed" +Aug 24 13:14:05.528: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e container agnhost-container: +STEP: delete the pod 08/24/23 13:14:05.559 +Aug 24 13:14:05.585: INFO: Waiting for pod pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e to disappear +Aug 24 13:14:05.593: INFO: Pod pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e no longer exists +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 17:05:43.963: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-network] Services +Aug 24 13:14:05.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-network] Services +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "services-3193" for this suite. 07/29/23 17:05:43.975 +STEP: Destroying namespace "configmap-2878" for this suite. 08/24/23 13:14:05.6 ------------------------------ -• [SLOW TEST] [8.266 seconds] -[sig-network] Services -test/e2e/network/common/framework.go:23 - should be able to change the type from NodePort to ExternalName [Conformance] - test/e2e/network/service.go:1557 +• [4.231 seconds] +[sig-storage] ConfigMap +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:109 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-network] Services + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:05:35.723 - Jul 29 17:05:35.723: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 17:05:35.725 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:35.758 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:35.764 - [BeforeEach] [sig-network] Services + STEP: Creating a kubernetes client 08/24/23 13:14:01.381 + Aug 24 13:14:01.381: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 13:14:01.383 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:14:01.459 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:14:01.467 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-network] Services - test/e2e/network/service.go:766 - [It] should be able to change the type from NodePort to ExternalName [Conformance] - test/e2e/network/service.go:1557 - STEP: creating a service nodeport-service with the type=NodePort in namespace services-3193 07/29/23 17:05:35.77 - STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service 07/29/23 17:05:35.815 - STEP: creating service externalsvc in namespace services-3193 07/29/23 17:05:35.815 - STEP: creating replication controller externalsvc in namespace services-3193 07/29/23 17:05:35.851 - I0729 17:05:35.871729 13 runners.go:193] Created replication controller with name: externalsvc, namespace: services-3193, replica count: 2 - I0729 17:05:38.923307 13 runners.go:193] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - STEP: changing the NodePort service to type=ExternalName 07/29/23 17:05:38.929 - Jul 29 17:05:38.982: INFO: Creating new exec pod - Jul 29 17:05:39.007: INFO: Waiting up to 5m0s for pod "execpodn292k" in namespace "services-3193" to be "running" - Jul 29 17:05:39.022: INFO: Pod "execpodn292k": Phase="Pending", Reason="", readiness=false. Elapsed: 14.327731ms - Jul 29 17:05:41.028: INFO: Pod "execpodn292k": Phase="Running", Reason="", readiness=true. Elapsed: 2.020243087s - Jul 29 17:05:41.028: INFO: Pod "execpodn292k" satisfied condition "running" - Jul 29 17:05:41.028: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-3193 exec execpodn292k -- /bin/sh -x -c nslookup nodeport-service.services-3193.svc.cluster.local' - Jul 29 17:05:41.373: INFO: stderr: "+ nslookup nodeport-service.services-3193.svc.cluster.local\n" - Jul 29 17:05:41.373: INFO: stdout: "Server:\t\t10.233.0.10\nAddress:\t10.233.0.10#53\n\nnodeport-service.services-3193.svc.cluster.local\tcanonical name = externalsvc.services-3193.svc.cluster.local.\nName:\texternalsvc.services-3193.svc.cluster.local\nAddress: 10.233.25.238\n\n" - STEP: deleting ReplicationController externalsvc in namespace services-3193, will wait for the garbage collector to delete the pods 07/29/23 17:05:41.373 - Jul 29 17:05:41.447: INFO: Deleting ReplicationController externalsvc took: 14.376028ms - Jul 29 17:05:41.548: INFO: Terminating ReplicationController externalsvc pods took: 100.162513ms - Jul 29 17:05:43.891: INFO: Cleaning up the NodePort to ExternalName test service - [AfterEach] [sig-network] Services + [It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:109 + STEP: Creating configMap with name configmap-test-volume-map-20aa8bf0-1eb2-43d3-8958-717c63950716 08/24/23 13:14:01.471 + STEP: Creating a pod to test consume configMaps 08/24/23 13:14:01.48 + Aug 24 13:14:01.505: INFO: Waiting up to 5m0s for pod "pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e" in namespace "configmap-2878" to be "Succeeded or Failed" + Aug 24 13:14:01.511: INFO: Pod "pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e": Phase="Pending", Reason="", readiness=false. Elapsed: 5.818679ms + Aug 24 13:14:03.519: INFO: Pod "pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01444033s + Aug 24 13:14:05.518: INFO: Pod "pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012996637s + STEP: Saw pod success 08/24/23 13:14:05.518 + Aug 24 13:14:05.518: INFO: Pod "pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e" satisfied condition "Succeeded or Failed" + Aug 24 13:14:05.528: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e container agnhost-container: + STEP: delete the pod 08/24/23 13:14:05.559 + Aug 24 13:14:05.585: INFO: Waiting for pod pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e to disappear + Aug 24 13:14:05.593: INFO: Pod pod-configmaps-d2ac5767-95ff-489c-9b6d-d4509021525e no longer exists + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 17:05:43.963: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-network] Services + Aug 24 13:14:05.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-network] Services + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "services-3193" for this suite. 07/29/23 17:05:43.975 + STEP: Destroying namespace "configmap-2878" for this suite. 08/24/23 13:14:05.6 << End Captured GinkgoWriter Output ------------------------------ -SSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Secrets - should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:68 -[BeforeEach] [sig-storage] Secrets +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate custom resource with pruning [Conformance] + test/e2e/apimachinery/webhook.go:341 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:05:43.992 -Jul 29 17:05:43.993: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename secrets 07/29/23 17:05:43.994 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:44.027 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:44.031 -[BeforeEach] [sig-storage] Secrets +STEP: Creating a kubernetes client 08/24/23 13:14:05.618 +Aug 24 13:14:05.619: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 13:14:05.621 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:14:05.643 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:14:05.649 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:68 -STEP: Creating secret with name secret-test-7057d575-3a45-4bda-a342-d18217df1d20 07/29/23 17:05:44.036 -STEP: Creating a pod to test consume secrets 07/29/23 17:05:44.046 -Jul 29 17:05:44.064: INFO: Waiting up to 5m0s for pod "pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55" in namespace "secrets-3916" to be "Succeeded or Failed" -Jul 29 17:05:44.083: INFO: Pod "pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55": Phase="Pending", Reason="", readiness=false. Elapsed: 19.181642ms -Jul 29 17:05:46.092: INFO: Pod "pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02794416s -Jul 29 17:05:48.094: INFO: Pod "pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030426931s -STEP: Saw pod success 07/29/23 17:05:48.095 -Jul 29 17:05:48.095: INFO: Pod "pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55" satisfied condition "Succeeded or Failed" -Jul 29 17:05:48.102: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55 container secret-volume-test: -STEP: delete the pod 07/29/23 17:05:48.117 -Jul 29 17:05:48.136: INFO: Waiting for pod pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55 to disappear -Jul 29 17:05:48.141: INFO: Pod pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55 no longer exists -[AfterEach] [sig-storage] Secrets +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 +STEP: Setting up server cert 08/24/23 13:14:05.677 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 13:14:07.051 +STEP: Deploying the webhook pod 08/24/23 13:14:07.062 +STEP: Wait for the deployment to be ready 08/24/23 13:14:07.099 +Aug 24 13:14:07.118: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service 08/24/23 13:14:09.142 +STEP: Verifying the service has paired with the endpoint 08/24/23 13:14:09.162 +Aug 24 13:14:10.163: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate custom resource with pruning [Conformance] + test/e2e/apimachinery/webhook.go:341 +Aug 24 13:14:10.173: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Registering the mutating webhook for custom resource e2e-test-webhook-4836-crds.webhook.example.com via the AdmissionRegistration API 08/24/23 13:14:10.694 +STEP: Creating a custom resource that should be mutated by the webhook 08/24/23 13:14:10.725 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 17:05:48.141: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Secrets +Aug 24 13:14:13.560: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Secrets +[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "secrets-3916" for this suite. 07/29/23 17:05:48.149 +STEP: Destroying namespace "webhook-1871" for this suite. 08/24/23 13:14:13.695 +STEP: Destroying namespace "webhook-1871-markers" for this suite. 08/24/23 13:14:13.728 ------------------------------ -• [4.168 seconds] -[sig-storage] Secrets -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:68 +• [SLOW TEST] [8.130 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +test/e2e/apimachinery/framework.go:23 + should mutate custom resource with pruning [Conformance] + test/e2e/apimachinery/webhook.go:341 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Secrets + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:05:43.992 - Jul 29 17:05:43.993: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename secrets 07/29/23 17:05:43.994 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:44.027 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:44.031 - [BeforeEach] [sig-storage] Secrets + STEP: Creating a kubernetes client 08/24/23 13:14:05.618 + Aug 24 13:14:05.619: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 13:14:05.621 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:14:05.643 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:14:05.649 + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/secrets_volume.go:68 - STEP: Creating secret with name secret-test-7057d575-3a45-4bda-a342-d18217df1d20 07/29/23 17:05:44.036 - STEP: Creating a pod to test consume secrets 07/29/23 17:05:44.046 - Jul 29 17:05:44.064: INFO: Waiting up to 5m0s for pod "pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55" in namespace "secrets-3916" to be "Succeeded or Failed" - Jul 29 17:05:44.083: INFO: Pod "pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55": Phase="Pending", Reason="", readiness=false. Elapsed: 19.181642ms - Jul 29 17:05:46.092: INFO: Pod "pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02794416s - Jul 29 17:05:48.094: INFO: Pod "pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030426931s - STEP: Saw pod success 07/29/23 17:05:48.095 - Jul 29 17:05:48.095: INFO: Pod "pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55" satisfied condition "Succeeded or Failed" - Jul 29 17:05:48.102: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55 container secret-volume-test: - STEP: delete the pod 07/29/23 17:05:48.117 - Jul 29 17:05:48.136: INFO: Waiting for pod pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55 to disappear - Jul 29 17:05:48.141: INFO: Pod pod-secrets-d74db004-7141-4ad8-bf63-835abc390f55 no longer exists - [AfterEach] [sig-storage] Secrets + [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:90 + STEP: Setting up server cert 08/24/23 13:14:05.677 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 13:14:07.051 + STEP: Deploying the webhook pod 08/24/23 13:14:07.062 + STEP: Wait for the deployment to be ready 08/24/23 13:14:07.099 + Aug 24 13:14:07.118: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set + STEP: Deploying the webhook service 08/24/23 13:14:09.142 + STEP: Verifying the service has paired with the endpoint 08/24/23 13:14:09.162 + Aug 24 13:14:10.163: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should mutate custom resource with pruning [Conformance] + test/e2e/apimachinery/webhook.go:341 + Aug 24 13:14:10.173: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Registering the mutating webhook for custom resource e2e-test-webhook-4836-crds.webhook.example.com via the AdmissionRegistration API 08/24/23 13:14:10.694 + STEP: Creating a custom resource that should be mutated by the webhook 08/24/23 13:14:10.725 + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 17:05:48.141: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Secrets + Aug 24 13:14:13.560: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + test/e2e/apimachinery/webhook.go:105 + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Secrets + [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "secrets-3916" for this suite. 07/29/23 17:05:48.149 + STEP: Destroying namespace "webhook-1871" for this suite. 08/24/23 13:14:13.695 + STEP: Destroying namespace "webhook-1871-markers" for this suite. 08/24/23 13:14:13.728 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSS ------------------------------ -[sig-apps] Deployment - should validate Deployment Status endpoints [Conformance] - test/e2e/apps/deployment.go:479 -[BeforeEach] [sig-apps] Deployment +[sig-node] Probing container + with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:108 +[BeforeEach] [sig-node] Probing container set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:05:48.164 -Jul 29 17:05:48.164: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename deployment 07/29/23 17:05:48.166 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:48.196 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:48.202 -[BeforeEach] [sig-apps] Deployment +STEP: Creating a kubernetes client 08/24/23 13:14:13.777 +Aug 24 13:14:13.777: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename container-probe 08/24/23 13:14:13.78 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:14:13.883 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:14:13.886 +[BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 -[It] should validate Deployment Status endpoints [Conformance] - test/e2e/apps/deployment.go:479 -STEP: creating a Deployment 07/29/23 17:05:48.213 -Jul 29 17:05:48.213: INFO: Creating simple deployment test-deployment-nbcc5 -Jul 29 17:05:48.238: INFO: new replicaset for deployment "test-deployment-nbcc5" is yet to be created -STEP: Getting /status 07/29/23 17:05:50.267 -Jul 29 17:05:50.276: INFO: Deployment test-deployment-nbcc5 has Conditions: [{Available True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:49 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-nbcc5-54bc444df" has successfully progressed.}] -STEP: updating Deployment Status 07/29/23 17:05:50.276 -Jul 29 17:05:50.292: INFO: updatedStatus.Conditions: []v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 5, 49, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 5, 49, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 5, 49, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 5, 48, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"test-deployment-nbcc5-54bc444df\" has successfully progressed."}, v1.DeploymentCondition{Type:"StatusUpdate", Status:"True", LastUpdateTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} -STEP: watching for the Deployment status to be updated 07/29/23 17:05:50.292 -Jul 29 17:05:50.296: INFO: Observed &Deployment event: ADDED -Jul 29 17:05:50.296: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-nbcc5-54bc444df"} -Jul 29 17:05:50.296: INFO: Observed &Deployment event: MODIFIED -Jul 29 17:05:50.297: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-nbcc5-54bc444df"} -Jul 29 17:05:50.297: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} -Jul 29 17:05:50.297: INFO: Observed &Deployment event: MODIFIED -Jul 29 17:05:50.297: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} -Jul 29 17:05:50.298: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC ReplicaSetUpdated ReplicaSet "test-deployment-nbcc5-54bc444df" is progressing.} -Jul 29 17:05:50.298: INFO: Observed &Deployment event: MODIFIED -Jul 29 17:05:50.298: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:49 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} -Jul 29 17:05:50.299: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-nbcc5-54bc444df" has successfully progressed.} -Jul 29 17:05:50.299: INFO: Observed &Deployment event: MODIFIED -Jul 29 17:05:50.299: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:49 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} -Jul 29 17:05:50.299: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-nbcc5-54bc444df" has successfully progressed.} -Jul 29 17:05:50.299: INFO: Found Deployment test-deployment-nbcc5 in namespace deployment-5406 with labels: map[e2e:testing name:httpd] annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} -Jul 29 17:05:50.299: INFO: Deployment test-deployment-nbcc5 has an updated status -STEP: patching the Statefulset Status 07/29/23 17:05:50.3 -Jul 29 17:05:50.300: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} -Jul 29 17:05:50.317: INFO: Patched status conditions: []v1.DeploymentCondition{v1.DeploymentCondition{Type:"StatusPatched", Status:"True", LastUpdateTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} -STEP: watching for the Deployment status to be patched 07/29/23 17:05:50.318 -Jul 29 17:05:50.322: INFO: Observed &Deployment event: ADDED -Jul 29 17:05:50.322: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-nbcc5-54bc444df"} -Jul 29 17:05:50.323: INFO: Observed &Deployment event: MODIFIED -Jul 29 17:05:50.323: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-nbcc5-54bc444df"} -Jul 29 17:05:50.323: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} -Jul 29 17:05:50.323: INFO: Observed &Deployment event: MODIFIED -Jul 29 17:05:50.323: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} -Jul 29 17:05:50.324: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC ReplicaSetUpdated ReplicaSet "test-deployment-nbcc5-54bc444df" is progressing.} -Jul 29 17:05:50.324: INFO: Observed &Deployment event: MODIFIED -Jul 29 17:05:50.324: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:49 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} -Jul 29 17:05:50.324: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-nbcc5-54bc444df" has successfully progressed.} -Jul 29 17:05:50.325: INFO: Observed &Deployment event: MODIFIED -Jul 29 17:05:50.325: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:49 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} -Jul 29 17:05:50.325: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-nbcc5-54bc444df" has successfully progressed.} -Jul 29 17:05:50.325: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} -Jul 29 17:05:50.326: INFO: Observed &Deployment event: MODIFIED -Jul 29 17:05:50.326: INFO: Found deployment test-deployment-nbcc5 in namespace deployment-5406 with labels: map[e2e:testing name:httpd] annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusPatched True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC } -Jul 29 17:05:50.326: INFO: Deployment test-deployment-nbcc5 has a patched status -[AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 -Jul 29 17:05:50.333: INFO: Deployment "test-deployment-nbcc5": -&Deployment{ObjectMeta:{test-deployment-nbcc5 deployment-5406 2e7fb8c0-2ac2-43e2-98de-c52abe540b4f 39547 1 2023-07-29 17:05:48 +0000 UTC map[e2e:testing name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 2023-07-29 17:05:48 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {e2e.test Update apps/v1 2023-07-29 17:05:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"StatusPatched\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update apps/v1 2023-07-29 17:05:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{e2e: testing,name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[e2e:testing name:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0047923a8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:StatusPatched,Status:True,Reason:,Message:,LastUpdateTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:0001-01-01 00:00:00 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:FoundNewReplicaSet,Message:Found new replica set "test-deployment-nbcc5-54bc444df",LastUpdateTime:2023-07-29 17:05:50 +0000 UTC,LastTransitionTime:2023-07-29 17:05:50 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - -Jul 29 17:05:50.339: INFO: New ReplicaSet "test-deployment-nbcc5-54bc444df" of Deployment "test-deployment-nbcc5": -&ReplicaSet{ObjectMeta:{test-deployment-nbcc5-54bc444df deployment-5406 e55bd983-0dc1-41f4-90f5-0a476bb9b719 39542 1 2023-07-29 17:05:48 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment-nbcc5 2e7fb8c0-2ac2-43e2-98de-c52abe540b4f 0xc006aa66e0 0xc006aa66e1}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:05:48 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2e7fb8c0-2ac2-43e2-98de-c52abe540b4f\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:05:49 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{e2e: testing,name: httpd,pod-template-hash: 54bc444df,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006aa6788 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} -Jul 29 17:05:50.346: INFO: Pod "test-deployment-nbcc5-54bc444df-8kfvf" is available: -&Pod{ObjectMeta:{test-deployment-nbcc5-54bc444df-8kfvf test-deployment-nbcc5-54bc444df- deployment-5406 e9430c10-f3f2-402d-847a-b0a67976a096 39541 0 2023-07-29 17:05:48 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[] [{apps/v1 ReplicaSet test-deployment-nbcc5-54bc444df e55bd983-0dc1-41f4-90f5-0a476bb9b719 0xc004792740 0xc004792741}] [] [{kube-controller-manager Update v1 2023-07-29 17:05:48 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"e55bd983-0dc1-41f4-90f5-0a476bb9b719\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:05:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.6\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-dpqr7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dpqr7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:48 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:48 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.6,StartTime:2023-07-29 17:05:48 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:05:49 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://12492fe029b698dc7c8f1c7a11179a257a9c07f4cf3df5ace2dfd8a5b09cf7c0,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.6,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +[BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 +[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:108 +[AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 -Jul 29 17:05:50.347: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Deployment +Aug 24 13:15:13.924: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 -STEP: Destroying namespace "deployment-5406" for this suite. 07/29/23 17:05:50.358 +STEP: Destroying namespace "container-probe-6021" for this suite. 08/24/23 13:15:13.934 ------------------------------ -• [2.213 seconds] -[sig-apps] Deployment -test/e2e/apps/framework.go:23 - should validate Deployment Status endpoints [Conformance] - test/e2e/apps/deployment.go:479 +• [SLOW TEST] [60.168 seconds] +[sig-node] Probing container +test/e2e/common/node/framework.go:23 + with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:108 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Deployment + [BeforeEach] [sig-node] Probing container set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:05:48.164 - Jul 29 17:05:48.164: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename deployment 07/29/23 17:05:48.166 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:48.196 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:48.202 - [BeforeEach] [sig-apps] Deployment + STEP: Creating a kubernetes client 08/24/23 13:14:13.777 + Aug 24 13:14:13.777: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename container-probe 08/24/23 13:14:13.78 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:14:13.883 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:14:13.886 + [BeforeEach] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 - [It] should validate Deployment Status endpoints [Conformance] - test/e2e/apps/deployment.go:479 - STEP: creating a Deployment 07/29/23 17:05:48.213 - Jul 29 17:05:48.213: INFO: Creating simple deployment test-deployment-nbcc5 - Jul 29 17:05:48.238: INFO: new replicaset for deployment "test-deployment-nbcc5" is yet to be created - STEP: Getting /status 07/29/23 17:05:50.267 - Jul 29 17:05:50.276: INFO: Deployment test-deployment-nbcc5 has Conditions: [{Available True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:49 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-nbcc5-54bc444df" has successfully progressed.}] - STEP: updating Deployment Status 07/29/23 17:05:50.276 - Jul 29 17:05:50.292: INFO: updatedStatus.Conditions: []v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 5, 49, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 5, 49, 0, time.Local), Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2023, time.July, 29, 17, 5, 49, 0, time.Local), LastTransitionTime:time.Date(2023, time.July, 29, 17, 5, 48, 0, time.Local), Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"test-deployment-nbcc5-54bc444df\" has successfully progressed."}, v1.DeploymentCondition{Type:"StatusUpdate", Status:"True", LastUpdateTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"E2E", Message:"Set from e2e test"}} - STEP: watching for the Deployment status to be updated 07/29/23 17:05:50.292 - Jul 29 17:05:50.296: INFO: Observed &Deployment event: ADDED - Jul 29 17:05:50.296: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-nbcc5-54bc444df"} - Jul 29 17:05:50.296: INFO: Observed &Deployment event: MODIFIED - Jul 29 17:05:50.297: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-nbcc5-54bc444df"} - Jul 29 17:05:50.297: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} - Jul 29 17:05:50.297: INFO: Observed &Deployment event: MODIFIED - Jul 29 17:05:50.297: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} - Jul 29 17:05:50.298: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC ReplicaSetUpdated ReplicaSet "test-deployment-nbcc5-54bc444df" is progressing.} - Jul 29 17:05:50.298: INFO: Observed &Deployment event: MODIFIED - Jul 29 17:05:50.298: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:49 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} - Jul 29 17:05:50.299: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-nbcc5-54bc444df" has successfully progressed.} - Jul 29 17:05:50.299: INFO: Observed &Deployment event: MODIFIED - Jul 29 17:05:50.299: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:49 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} - Jul 29 17:05:50.299: INFO: Observed Deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-nbcc5-54bc444df" has successfully progressed.} - Jul 29 17:05:50.299: INFO: Found Deployment test-deployment-nbcc5 in namespace deployment-5406 with labels: map[e2e:testing name:httpd] annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} - Jul 29 17:05:50.299: INFO: Deployment test-deployment-nbcc5 has an updated status - STEP: patching the Statefulset Status 07/29/23 17:05:50.3 - Jul 29 17:05:50.300: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} - Jul 29 17:05:50.317: INFO: Patched status conditions: []v1.DeploymentCondition{v1.DeploymentCondition{Type:"StatusPatched", Status:"True", LastUpdateTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Reason:"", Message:""}} - STEP: watching for the Deployment status to be patched 07/29/23 17:05:50.318 - Jul 29 17:05:50.322: INFO: Observed &Deployment event: ADDED - Jul 29 17:05:50.322: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-nbcc5-54bc444df"} - Jul 29 17:05:50.323: INFO: Observed &Deployment event: MODIFIED - Jul 29 17:05:50.323: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-nbcc5-54bc444df"} - Jul 29 17:05:50.323: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} - Jul 29 17:05:50.323: INFO: Observed &Deployment event: MODIFIED - Jul 29 17:05:50.323: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} - Jul 29 17:05:50.324: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:48 +0000 UTC 2023-07-29 17:05:48 +0000 UTC ReplicaSetUpdated ReplicaSet "test-deployment-nbcc5-54bc444df" is progressing.} - Jul 29 17:05:50.324: INFO: Observed &Deployment event: MODIFIED - Jul 29 17:05:50.324: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:49 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} - Jul 29 17:05:50.324: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-nbcc5-54bc444df" has successfully progressed.} - Jul 29 17:05:50.325: INFO: Observed &Deployment event: MODIFIED - Jul 29 17:05:50.325: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:49 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} - Jul 29 17:05:50.325: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2023-07-29 17:05:49 +0000 UTC 2023-07-29 17:05:48 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-nbcc5-54bc444df" has successfully progressed.} - Jul 29 17:05:50.325: INFO: Observed deployment test-deployment-nbcc5 in namespace deployment-5406 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} - Jul 29 17:05:50.326: INFO: Observed &Deployment event: MODIFIED - Jul 29 17:05:50.326: INFO: Found deployment test-deployment-nbcc5 in namespace deployment-5406 with labels: map[e2e:testing name:httpd] annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusPatched True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC } - Jul 29 17:05:50.326: INFO: Deployment test-deployment-nbcc5 has a patched status - [AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 - Jul 29 17:05:50.333: INFO: Deployment "test-deployment-nbcc5": - &Deployment{ObjectMeta:{test-deployment-nbcc5 deployment-5406 2e7fb8c0-2ac2-43e2-98de-c52abe540b4f 39547 1 2023-07-29 17:05:48 +0000 UTC map[e2e:testing name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 2023-07-29 17:05:48 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {e2e.test Update apps/v1 2023-07-29 17:05:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"StatusPatched\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update apps/v1 2023-07-29 17:05:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{e2e: testing,name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[e2e:testing name:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0047923a8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:StatusPatched,Status:True,Reason:,Message:,LastUpdateTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:0001-01-01 00:00:00 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:FoundNewReplicaSet,Message:Found new replica set "test-deployment-nbcc5-54bc444df",LastUpdateTime:2023-07-29 17:05:50 +0000 UTC,LastTransitionTime:2023-07-29 17:05:50 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - - Jul 29 17:05:50.339: INFO: New ReplicaSet "test-deployment-nbcc5-54bc444df" of Deployment "test-deployment-nbcc5": - &ReplicaSet{ObjectMeta:{test-deployment-nbcc5-54bc444df deployment-5406 e55bd983-0dc1-41f4-90f5-0a476bb9b719 39542 1 2023-07-29 17:05:48 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment-nbcc5 2e7fb8c0-2ac2-43e2-98de-c52abe540b4f 0xc006aa66e0 0xc006aa66e1}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:05:48 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"2e7fb8c0-2ac2-43e2-98de-c52abe540b4f\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:05:49 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{e2e: testing,name: httpd,pod-template-hash: 54bc444df,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc006aa6788 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} - Jul 29 17:05:50.346: INFO: Pod "test-deployment-nbcc5-54bc444df-8kfvf" is available: - &Pod{ObjectMeta:{test-deployment-nbcc5-54bc444df-8kfvf test-deployment-nbcc5-54bc444df- deployment-5406 e9430c10-f3f2-402d-847a-b0a67976a096 39541 0 2023-07-29 17:05:48 +0000 UTC map[e2e:testing name:httpd pod-template-hash:54bc444df] map[] [{apps/v1 ReplicaSet test-deployment-nbcc5-54bc444df e55bd983-0dc1-41f4-90f5-0a476bb9b719 0xc004792740 0xc004792741}] [] [{kube-controller-manager Update v1 2023-07-29 17:05:48 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"e55bd983-0dc1-41f4-90f5-0a476bb9b719\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:05:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.6\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-dpqr7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dpqr7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:48 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:48 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.6,StartTime:2023-07-29 17:05:48 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:05:49 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://12492fe029b698dc7c8f1c7a11179a257a9c07f4cf3df5ace2dfd8a5b09cf7c0,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.6,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - [AfterEach] [sig-apps] Deployment + [BeforeEach] [sig-node] Probing container + test/e2e/common/node/container_probe.go:63 + [It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + test/e2e/common/node/container_probe.go:108 + [AfterEach] [sig-node] Probing container test/e2e/framework/node/init/init.go:32 - Jul 29 17:05:50.347: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Deployment + Aug 24 13:15:13.924: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Probing container test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-node] Probing container dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-node] Probing container tear down framework | framework.go:193 - STEP: Destroying namespace "deployment-5406" for this suite. 07/29/23 17:05:50.358 + STEP: Destroying namespace "container-probe-6021" for this suite. 08/24/23 13:15:13.934 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-instrumentation] Events - should manage the lifecycle of an event [Conformance] - test/e2e/instrumentation/core_events.go:57 -[BeforeEach] [sig-instrumentation] Events +[sig-api-machinery] Discovery + should validate PreferredVersion for each APIGroup [Conformance] + test/e2e/apimachinery/discovery.go:122 +[BeforeEach] [sig-api-machinery] Discovery set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:05:50.386 -Jul 29 17:05:50.386: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename events 07/29/23 17:05:50.388 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:50.426 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:50.432 -[BeforeEach] [sig-instrumentation] Events +STEP: Creating a kubernetes client 08/24/23 13:15:13.956 +Aug 24 13:15:13.956: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename discovery 08/24/23 13:15:13.959 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:13.997 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:14.002 +[BeforeEach] [sig-api-machinery] Discovery test/e2e/framework/metrics/init/init.go:31 -[It] should manage the lifecycle of an event [Conformance] - test/e2e/instrumentation/core_events.go:57 -STEP: creating a test event 07/29/23 17:05:50.438 -STEP: listing all events in all namespaces 07/29/23 17:05:50.445 -STEP: patching the test event 07/29/23 17:05:50.452 -STEP: fetching the test event 07/29/23 17:05:50.466 -STEP: updating the test event 07/29/23 17:05:50.473 -STEP: getting the test event 07/29/23 17:05:50.494 -STEP: deleting the test event 07/29/23 17:05:50.502 -STEP: listing all events in all namespaces 07/29/23 17:05:50.526 -[AfterEach] [sig-instrumentation] Events +[BeforeEach] [sig-api-machinery] Discovery + test/e2e/apimachinery/discovery.go:43 +STEP: Setting up server cert 08/24/23 13:15:14.008 +[It] should validate PreferredVersion for each APIGroup [Conformance] + test/e2e/apimachinery/discovery.go:122 +Aug 24 13:15:14.906: INFO: Checking APIGroup: apiregistration.k8s.io +Aug 24 13:15:14.909: INFO: PreferredVersion.GroupVersion: apiregistration.k8s.io/v1 +Aug 24 13:15:14.909: INFO: Versions found [{apiregistration.k8s.io/v1 v1}] +Aug 24 13:15:14.909: INFO: apiregistration.k8s.io/v1 matches apiregistration.k8s.io/v1 +Aug 24 13:15:14.909: INFO: Checking APIGroup: apps +Aug 24 13:15:14.911: INFO: PreferredVersion.GroupVersion: apps/v1 +Aug 24 13:15:14.911: INFO: Versions found [{apps/v1 v1}] +Aug 24 13:15:14.911: INFO: apps/v1 matches apps/v1 +Aug 24 13:15:14.911: INFO: Checking APIGroup: events.k8s.io +Aug 24 13:15:14.913: INFO: PreferredVersion.GroupVersion: events.k8s.io/v1 +Aug 24 13:15:14.913: INFO: Versions found [{events.k8s.io/v1 v1}] +Aug 24 13:15:14.913: INFO: events.k8s.io/v1 matches events.k8s.io/v1 +Aug 24 13:15:14.913: INFO: Checking APIGroup: authentication.k8s.io +Aug 24 13:15:14.915: INFO: PreferredVersion.GroupVersion: authentication.k8s.io/v1 +Aug 24 13:15:14.915: INFO: Versions found [{authentication.k8s.io/v1 v1}] +Aug 24 13:15:14.915: INFO: authentication.k8s.io/v1 matches authentication.k8s.io/v1 +Aug 24 13:15:14.915: INFO: Checking APIGroup: authorization.k8s.io +Aug 24 13:15:14.917: INFO: PreferredVersion.GroupVersion: authorization.k8s.io/v1 +Aug 24 13:15:14.917: INFO: Versions found [{authorization.k8s.io/v1 v1}] +Aug 24 13:15:14.917: INFO: authorization.k8s.io/v1 matches authorization.k8s.io/v1 +Aug 24 13:15:14.917: INFO: Checking APIGroup: autoscaling +Aug 24 13:15:14.919: INFO: PreferredVersion.GroupVersion: autoscaling/v2 +Aug 24 13:15:14.919: INFO: Versions found [{autoscaling/v2 v2} {autoscaling/v1 v1}] +Aug 24 13:15:14.919: INFO: autoscaling/v2 matches autoscaling/v2 +Aug 24 13:15:14.919: INFO: Checking APIGroup: batch +Aug 24 13:15:14.921: INFO: PreferredVersion.GroupVersion: batch/v1 +Aug 24 13:15:14.921: INFO: Versions found [{batch/v1 v1}] +Aug 24 13:15:14.921: INFO: batch/v1 matches batch/v1 +Aug 24 13:15:14.921: INFO: Checking APIGroup: certificates.k8s.io +Aug 24 13:15:14.922: INFO: PreferredVersion.GroupVersion: certificates.k8s.io/v1 +Aug 24 13:15:14.922: INFO: Versions found [{certificates.k8s.io/v1 v1}] +Aug 24 13:15:14.923: INFO: certificates.k8s.io/v1 matches certificates.k8s.io/v1 +Aug 24 13:15:14.923: INFO: Checking APIGroup: networking.k8s.io +Aug 24 13:15:14.924: INFO: PreferredVersion.GroupVersion: networking.k8s.io/v1 +Aug 24 13:15:14.924: INFO: Versions found [{networking.k8s.io/v1 v1}] +Aug 24 13:15:14.924: INFO: networking.k8s.io/v1 matches networking.k8s.io/v1 +Aug 24 13:15:14.924: INFO: Checking APIGroup: policy +Aug 24 13:15:14.926: INFO: PreferredVersion.GroupVersion: policy/v1 +Aug 24 13:15:14.926: INFO: Versions found [{policy/v1 v1}] +Aug 24 13:15:14.926: INFO: policy/v1 matches policy/v1 +Aug 24 13:15:14.926: INFO: Checking APIGroup: rbac.authorization.k8s.io +Aug 24 13:15:14.928: INFO: PreferredVersion.GroupVersion: rbac.authorization.k8s.io/v1 +Aug 24 13:15:14.928: INFO: Versions found [{rbac.authorization.k8s.io/v1 v1}] +Aug 24 13:15:14.928: INFO: rbac.authorization.k8s.io/v1 matches rbac.authorization.k8s.io/v1 +Aug 24 13:15:14.928: INFO: Checking APIGroup: storage.k8s.io +Aug 24 13:15:14.929: INFO: PreferredVersion.GroupVersion: storage.k8s.io/v1 +Aug 24 13:15:14.929: INFO: Versions found [{storage.k8s.io/v1 v1} {storage.k8s.io/v1beta1 v1beta1}] +Aug 24 13:15:14.929: INFO: storage.k8s.io/v1 matches storage.k8s.io/v1 +Aug 24 13:15:14.930: INFO: Checking APIGroup: admissionregistration.k8s.io +Aug 24 13:15:14.931: INFO: PreferredVersion.GroupVersion: admissionregistration.k8s.io/v1 +Aug 24 13:15:14.931: INFO: Versions found [{admissionregistration.k8s.io/v1 v1}] +Aug 24 13:15:14.931: INFO: admissionregistration.k8s.io/v1 matches admissionregistration.k8s.io/v1 +Aug 24 13:15:14.931: INFO: Checking APIGroup: apiextensions.k8s.io +Aug 24 13:15:14.933: INFO: PreferredVersion.GroupVersion: apiextensions.k8s.io/v1 +Aug 24 13:15:14.933: INFO: Versions found [{apiextensions.k8s.io/v1 v1}] +Aug 24 13:15:14.933: INFO: apiextensions.k8s.io/v1 matches apiextensions.k8s.io/v1 +Aug 24 13:15:14.934: INFO: Checking APIGroup: scheduling.k8s.io +Aug 24 13:15:14.935: INFO: PreferredVersion.GroupVersion: scheduling.k8s.io/v1 +Aug 24 13:15:14.935: INFO: Versions found [{scheduling.k8s.io/v1 v1}] +Aug 24 13:15:14.935: INFO: scheduling.k8s.io/v1 matches scheduling.k8s.io/v1 +Aug 24 13:15:14.935: INFO: Checking APIGroup: coordination.k8s.io +Aug 24 13:15:14.937: INFO: PreferredVersion.GroupVersion: coordination.k8s.io/v1 +Aug 24 13:15:14.937: INFO: Versions found [{coordination.k8s.io/v1 v1}] +Aug 24 13:15:14.937: INFO: coordination.k8s.io/v1 matches coordination.k8s.io/v1 +Aug 24 13:15:14.937: INFO: Checking APIGroup: node.k8s.io +Aug 24 13:15:14.939: INFO: PreferredVersion.GroupVersion: node.k8s.io/v1 +Aug 24 13:15:14.939: INFO: Versions found [{node.k8s.io/v1 v1}] +Aug 24 13:15:14.939: INFO: node.k8s.io/v1 matches node.k8s.io/v1 +Aug 24 13:15:14.939: INFO: Checking APIGroup: discovery.k8s.io +Aug 24 13:15:14.942: INFO: PreferredVersion.GroupVersion: discovery.k8s.io/v1 +Aug 24 13:15:14.942: INFO: Versions found [{discovery.k8s.io/v1 v1}] +Aug 24 13:15:14.942: INFO: discovery.k8s.io/v1 matches discovery.k8s.io/v1 +Aug 24 13:15:14.942: INFO: Checking APIGroup: flowcontrol.apiserver.k8s.io +Aug 24 13:15:14.945: INFO: PreferredVersion.GroupVersion: flowcontrol.apiserver.k8s.io/v1beta3 +Aug 24 13:15:14.945: INFO: Versions found [{flowcontrol.apiserver.k8s.io/v1beta3 v1beta3} {flowcontrol.apiserver.k8s.io/v1beta2 v1beta2}] +Aug 24 13:15:14.945: INFO: flowcontrol.apiserver.k8s.io/v1beta3 matches flowcontrol.apiserver.k8s.io/v1beta3 +Aug 24 13:15:14.945: INFO: Checking APIGroup: cilium.io +Aug 24 13:15:14.946: INFO: PreferredVersion.GroupVersion: cilium.io/v2 +Aug 24 13:15:14.947: INFO: Versions found [{cilium.io/v2 v2} {cilium.io/v2alpha1 v2alpha1}] +Aug 24 13:15:14.947: INFO: cilium.io/v2 matches cilium.io/v2 +[AfterEach] [sig-api-machinery] Discovery test/e2e/framework/node/init/init.go:32 -Jul 29 17:05:50.533: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-instrumentation] Events +Aug 24 13:15:14.947: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-api-machinery] Discovery test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-instrumentation] Events +[DeferCleanup (Each)] [sig-api-machinery] Discovery dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-instrumentation] Events +[DeferCleanup (Each)] [sig-api-machinery] Discovery tear down framework | framework.go:193 -STEP: Destroying namespace "events-7888" for this suite. 07/29/23 17:05:50.543 +STEP: Destroying namespace "discovery-3080" for this suite. 08/24/23 13:15:14.956 ------------------------------ -• [0.170 seconds] -[sig-instrumentation] Events -test/e2e/instrumentation/common/framework.go:23 - should manage the lifecycle of an event [Conformance] - test/e2e/instrumentation/core_events.go:57 +• [1.013 seconds] +[sig-api-machinery] Discovery +test/e2e/apimachinery/framework.go:23 + should validate PreferredVersion for each APIGroup [Conformance] + test/e2e/apimachinery/discovery.go:122 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-instrumentation] Events + [BeforeEach] [sig-api-machinery] Discovery set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:05:50.386 - Jul 29 17:05:50.386: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename events 07/29/23 17:05:50.388 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:50.426 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:50.432 - [BeforeEach] [sig-instrumentation] Events + STEP: Creating a kubernetes client 08/24/23 13:15:13.956 + Aug 24 13:15:13.956: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename discovery 08/24/23 13:15:13.959 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:13.997 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:14.002 + [BeforeEach] [sig-api-machinery] Discovery test/e2e/framework/metrics/init/init.go:31 - [It] should manage the lifecycle of an event [Conformance] - test/e2e/instrumentation/core_events.go:57 - STEP: creating a test event 07/29/23 17:05:50.438 - STEP: listing all events in all namespaces 07/29/23 17:05:50.445 - STEP: patching the test event 07/29/23 17:05:50.452 - STEP: fetching the test event 07/29/23 17:05:50.466 - STEP: updating the test event 07/29/23 17:05:50.473 - STEP: getting the test event 07/29/23 17:05:50.494 - STEP: deleting the test event 07/29/23 17:05:50.502 - STEP: listing all events in all namespaces 07/29/23 17:05:50.526 - [AfterEach] [sig-instrumentation] Events + [BeforeEach] [sig-api-machinery] Discovery + test/e2e/apimachinery/discovery.go:43 + STEP: Setting up server cert 08/24/23 13:15:14.008 + [It] should validate PreferredVersion for each APIGroup [Conformance] + test/e2e/apimachinery/discovery.go:122 + Aug 24 13:15:14.906: INFO: Checking APIGroup: apiregistration.k8s.io + Aug 24 13:15:14.909: INFO: PreferredVersion.GroupVersion: apiregistration.k8s.io/v1 + Aug 24 13:15:14.909: INFO: Versions found [{apiregistration.k8s.io/v1 v1}] + Aug 24 13:15:14.909: INFO: apiregistration.k8s.io/v1 matches apiregistration.k8s.io/v1 + Aug 24 13:15:14.909: INFO: Checking APIGroup: apps + Aug 24 13:15:14.911: INFO: PreferredVersion.GroupVersion: apps/v1 + Aug 24 13:15:14.911: INFO: Versions found [{apps/v1 v1}] + Aug 24 13:15:14.911: INFO: apps/v1 matches apps/v1 + Aug 24 13:15:14.911: INFO: Checking APIGroup: events.k8s.io + Aug 24 13:15:14.913: INFO: PreferredVersion.GroupVersion: events.k8s.io/v1 + Aug 24 13:15:14.913: INFO: Versions found [{events.k8s.io/v1 v1}] + Aug 24 13:15:14.913: INFO: events.k8s.io/v1 matches events.k8s.io/v1 + Aug 24 13:15:14.913: INFO: Checking APIGroup: authentication.k8s.io + Aug 24 13:15:14.915: INFO: PreferredVersion.GroupVersion: authentication.k8s.io/v1 + Aug 24 13:15:14.915: INFO: Versions found [{authentication.k8s.io/v1 v1}] + Aug 24 13:15:14.915: INFO: authentication.k8s.io/v1 matches authentication.k8s.io/v1 + Aug 24 13:15:14.915: INFO: Checking APIGroup: authorization.k8s.io + Aug 24 13:15:14.917: INFO: PreferredVersion.GroupVersion: authorization.k8s.io/v1 + Aug 24 13:15:14.917: INFO: Versions found [{authorization.k8s.io/v1 v1}] + Aug 24 13:15:14.917: INFO: authorization.k8s.io/v1 matches authorization.k8s.io/v1 + Aug 24 13:15:14.917: INFO: Checking APIGroup: autoscaling + Aug 24 13:15:14.919: INFO: PreferredVersion.GroupVersion: autoscaling/v2 + Aug 24 13:15:14.919: INFO: Versions found [{autoscaling/v2 v2} {autoscaling/v1 v1}] + Aug 24 13:15:14.919: INFO: autoscaling/v2 matches autoscaling/v2 + Aug 24 13:15:14.919: INFO: Checking APIGroup: batch + Aug 24 13:15:14.921: INFO: PreferredVersion.GroupVersion: batch/v1 + Aug 24 13:15:14.921: INFO: Versions found [{batch/v1 v1}] + Aug 24 13:15:14.921: INFO: batch/v1 matches batch/v1 + Aug 24 13:15:14.921: INFO: Checking APIGroup: certificates.k8s.io + Aug 24 13:15:14.922: INFO: PreferredVersion.GroupVersion: certificates.k8s.io/v1 + Aug 24 13:15:14.922: INFO: Versions found [{certificates.k8s.io/v1 v1}] + Aug 24 13:15:14.923: INFO: certificates.k8s.io/v1 matches certificates.k8s.io/v1 + Aug 24 13:15:14.923: INFO: Checking APIGroup: networking.k8s.io + Aug 24 13:15:14.924: INFO: PreferredVersion.GroupVersion: networking.k8s.io/v1 + Aug 24 13:15:14.924: INFO: Versions found [{networking.k8s.io/v1 v1}] + Aug 24 13:15:14.924: INFO: networking.k8s.io/v1 matches networking.k8s.io/v1 + Aug 24 13:15:14.924: INFO: Checking APIGroup: policy + Aug 24 13:15:14.926: INFO: PreferredVersion.GroupVersion: policy/v1 + Aug 24 13:15:14.926: INFO: Versions found [{policy/v1 v1}] + Aug 24 13:15:14.926: INFO: policy/v1 matches policy/v1 + Aug 24 13:15:14.926: INFO: Checking APIGroup: rbac.authorization.k8s.io + Aug 24 13:15:14.928: INFO: PreferredVersion.GroupVersion: rbac.authorization.k8s.io/v1 + Aug 24 13:15:14.928: INFO: Versions found [{rbac.authorization.k8s.io/v1 v1}] + Aug 24 13:15:14.928: INFO: rbac.authorization.k8s.io/v1 matches rbac.authorization.k8s.io/v1 + Aug 24 13:15:14.928: INFO: Checking APIGroup: storage.k8s.io + Aug 24 13:15:14.929: INFO: PreferredVersion.GroupVersion: storage.k8s.io/v1 + Aug 24 13:15:14.929: INFO: Versions found [{storage.k8s.io/v1 v1} {storage.k8s.io/v1beta1 v1beta1}] + Aug 24 13:15:14.929: INFO: storage.k8s.io/v1 matches storage.k8s.io/v1 + Aug 24 13:15:14.930: INFO: Checking APIGroup: admissionregistration.k8s.io + Aug 24 13:15:14.931: INFO: PreferredVersion.GroupVersion: admissionregistration.k8s.io/v1 + Aug 24 13:15:14.931: INFO: Versions found [{admissionregistration.k8s.io/v1 v1}] + Aug 24 13:15:14.931: INFO: admissionregistration.k8s.io/v1 matches admissionregistration.k8s.io/v1 + Aug 24 13:15:14.931: INFO: Checking APIGroup: apiextensions.k8s.io + Aug 24 13:15:14.933: INFO: PreferredVersion.GroupVersion: apiextensions.k8s.io/v1 + Aug 24 13:15:14.933: INFO: Versions found [{apiextensions.k8s.io/v1 v1}] + Aug 24 13:15:14.933: INFO: apiextensions.k8s.io/v1 matches apiextensions.k8s.io/v1 + Aug 24 13:15:14.934: INFO: Checking APIGroup: scheduling.k8s.io + Aug 24 13:15:14.935: INFO: PreferredVersion.GroupVersion: scheduling.k8s.io/v1 + Aug 24 13:15:14.935: INFO: Versions found [{scheduling.k8s.io/v1 v1}] + Aug 24 13:15:14.935: INFO: scheduling.k8s.io/v1 matches scheduling.k8s.io/v1 + Aug 24 13:15:14.935: INFO: Checking APIGroup: coordination.k8s.io + Aug 24 13:15:14.937: INFO: PreferredVersion.GroupVersion: coordination.k8s.io/v1 + Aug 24 13:15:14.937: INFO: Versions found [{coordination.k8s.io/v1 v1}] + Aug 24 13:15:14.937: INFO: coordination.k8s.io/v1 matches coordination.k8s.io/v1 + Aug 24 13:15:14.937: INFO: Checking APIGroup: node.k8s.io + Aug 24 13:15:14.939: INFO: PreferredVersion.GroupVersion: node.k8s.io/v1 + Aug 24 13:15:14.939: INFO: Versions found [{node.k8s.io/v1 v1}] + Aug 24 13:15:14.939: INFO: node.k8s.io/v1 matches node.k8s.io/v1 + Aug 24 13:15:14.939: INFO: Checking APIGroup: discovery.k8s.io + Aug 24 13:15:14.942: INFO: PreferredVersion.GroupVersion: discovery.k8s.io/v1 + Aug 24 13:15:14.942: INFO: Versions found [{discovery.k8s.io/v1 v1}] + Aug 24 13:15:14.942: INFO: discovery.k8s.io/v1 matches discovery.k8s.io/v1 + Aug 24 13:15:14.942: INFO: Checking APIGroup: flowcontrol.apiserver.k8s.io + Aug 24 13:15:14.945: INFO: PreferredVersion.GroupVersion: flowcontrol.apiserver.k8s.io/v1beta3 + Aug 24 13:15:14.945: INFO: Versions found [{flowcontrol.apiserver.k8s.io/v1beta3 v1beta3} {flowcontrol.apiserver.k8s.io/v1beta2 v1beta2}] + Aug 24 13:15:14.945: INFO: flowcontrol.apiserver.k8s.io/v1beta3 matches flowcontrol.apiserver.k8s.io/v1beta3 + Aug 24 13:15:14.945: INFO: Checking APIGroup: cilium.io + Aug 24 13:15:14.946: INFO: PreferredVersion.GroupVersion: cilium.io/v2 + Aug 24 13:15:14.947: INFO: Versions found [{cilium.io/v2 v2} {cilium.io/v2alpha1 v2alpha1}] + Aug 24 13:15:14.947: INFO: cilium.io/v2 matches cilium.io/v2 + [AfterEach] [sig-api-machinery] Discovery test/e2e/framework/node/init/init.go:32 - Jul 29 17:05:50.533: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-instrumentation] Events + Aug 24 13:15:14.947: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-api-machinery] Discovery test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-instrumentation] Events + [DeferCleanup (Each)] [sig-api-machinery] Discovery dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-instrumentation] Events + [DeferCleanup (Each)] [sig-api-machinery] Discovery tear down framework | framework.go:193 - STEP: Destroying namespace "events-7888" for this suite. 07/29/23 17:05:50.543 + STEP: Destroying namespace "discovery-3080" for this suite. 08/24/23 13:15:14.956 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSS +SSSSSSSSS ------------------------------ -[sig-node] Pods - should be submitted and removed [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:226 -[BeforeEach] [sig-node] Pods +[sig-scheduling] SchedulerPredicates [Serial] + validates that NodeSelector is respected if matching [Conformance] + test/e2e/scheduling/predicates.go:466 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:05:50.563 -Jul 29 17:05:50.563: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename pods 07/29/23 17:05:50.565 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:50.596 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:50.601 -[BeforeEach] [sig-node] Pods +STEP: Creating a kubernetes client 08/24/23 13:15:14.973 +Aug 24 13:15:14.973: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename sched-pred 08/24/23 13:15:14.974 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:15.01 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:15.015 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 -[It] should be submitted and removed [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:226 -STEP: creating the pod 07/29/23 17:05:50.608 -STEP: setting up watch 07/29/23 17:05:50.608 -STEP: submitting the pod to kubernetes 07/29/23 17:05:50.716 -STEP: verifying the pod is in kubernetes 07/29/23 17:05:50.734 -STEP: verifying pod creation was observed 07/29/23 17:05:50.745 -Jul 29 17:05:50.745: INFO: Waiting up to 5m0s for pod "pod-submit-remove-5a9f9e99-f8c2-4e2a-8747-2734116ed9d7" in namespace "pods-2220" to be "running" -Jul 29 17:05:50.757: INFO: Pod "pod-submit-remove-5a9f9e99-f8c2-4e2a-8747-2734116ed9d7": Phase="Pending", Reason="", readiness=false. Elapsed: 12.001103ms -Jul 29 17:05:52.765: INFO: Pod "pod-submit-remove-5a9f9e99-f8c2-4e2a-8747-2734116ed9d7": Phase="Running", Reason="", readiness=true. Elapsed: 2.020242795s -Jul 29 17:05:52.765: INFO: Pod "pod-submit-remove-5a9f9e99-f8c2-4e2a-8747-2734116ed9d7" satisfied condition "running" -STEP: deleting the pod gracefully 07/29/23 17:05:52.772 -STEP: verifying pod deletion was observed 07/29/23 17:05:52.785 -[AfterEach] [sig-node] Pods +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:97 +Aug 24 13:15:15.021: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Aug 24 13:15:15.040: INFO: Waiting for terminating namespaces to be deleted... +Aug 24 13:15:15.082: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-1 before test +Aug 24 13:15:15.104: INFO: cilium-node-init-wqpdx from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.105: INFO: Container node-init ready: true, restart count 0 +Aug 24 13:15:15.105: INFO: cilium-wpzgb from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.105: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 13:15:15.105: INFO: coredns-787d4945fb-8jnm5 from kube-system started at 2023-08-24 11:24:04 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.105: INFO: Container coredns ready: true, restart count 0 +Aug 24 13:15:15.105: INFO: coredns-787d4945fb-d76z6 from kube-system started at 2023-08-24 11:24:07 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.105: INFO: Container coredns ready: true, restart count 0 +Aug 24 13:15:15.106: INFO: kube-addon-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.106: INFO: Container kube-addon-manager ready: true, restart count 0 +Aug 24 13:15:15.106: INFO: kube-apiserver-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.106: INFO: Container kube-apiserver ready: true, restart count 0 +Aug 24 13:15:15.106: INFO: kube-controller-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.106: INFO: Container kube-controller-manager ready: true, restart count 0 +Aug 24 13:15:15.106: INFO: kube-proxy-nr5bs from kube-system started at 2023-08-24 11:21:24 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.106: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 13:15:15.106: INFO: kube-scheduler-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.106: INFO: Container kube-scheduler ready: true, restart count 0 +Aug 24 13:15:15.107: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) +Aug 24 13:15:15.107: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 13:15:15.107: INFO: Container systemd-logs ready: true, restart count 0 +Aug 24 13:15:15.107: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-2 before test +Aug 24 13:15:15.124: INFO: cilium-node-init-95cbk from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.124: INFO: Container node-init ready: true, restart count 0 +Aug 24 13:15:15.124: INFO: cilium-operator-75f7897945-8qqz2 from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.124: INFO: Container cilium-operator ready: true, restart count 0 +Aug 24 13:15:15.124: INFO: cilium-rcknz from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.124: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 13:15:15.124: INFO: kube-addon-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:37 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.124: INFO: Container kube-addon-manager ready: true, restart count 0 +Aug 24 13:15:15.124: INFO: kube-apiserver-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.124: INFO: Container kube-apiserver ready: true, restart count 0 +Aug 24 13:15:15.124: INFO: kube-controller-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.124: INFO: Container kube-controller-manager ready: true, restart count 0 +Aug 24 13:15:15.124: INFO: kube-proxy-lm2dm from kube-system started at 2023-08-24 11:22:03 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.124: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 13:15:15.124: INFO: kube-scheduler-pe9deep4seen-2 from kube-system started at 2023-08-24 11:25:19 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.124: INFO: Container kube-scheduler ready: true, restart count 0 +Aug 24 13:15:15.124: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-nxmsl from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) +Aug 24 13:15:15.124: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 13:15:15.124: INFO: Container systemd-logs ready: true, restart count 0 +Aug 24 13:15:15.124: INFO: +Logging pods the apiserver thinks is on node pe9deep4seen-3 before test +Aug 24 13:15:15.139: INFO: test-webserver-49e04467-5347-46a7-92e9-074529e73558 from container-probe-6021 started at 2023-08-24 13:14:13 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.139: INFO: Container test-webserver ready: false, restart count 0 +Aug 24 13:15:15.139: INFO: cilium-node-init-pdcw9 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.140: INFO: Container node-init ready: true, restart count 0 +Aug 24 13:15:15.140: INFO: cilium-xgc44 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.140: INFO: Container cilium-agent ready: true, restart count 0 +Aug 24 13:15:15.140: INFO: kube-proxy-8vv8d from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.140: INFO: Container kube-proxy ready: true, restart count 0 +Aug 24 13:15:15.140: INFO: sonobuoy from sonobuoy started at 2023-08-24 11:38:19 +0000 UTC (1 container statuses recorded) +Aug 24 13:15:15.140: INFO: Container kube-sonobuoy ready: true, restart count 0 +Aug 24 13:15:15.141: INFO: sonobuoy-e2e-job-b3f52dde3e8a4a4e from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) +Aug 24 13:15:15.141: INFO: Container e2e ready: true, restart count 0 +Aug 24 13:15:15.141: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 13:15:15.141: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-p6l72 from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) +Aug 24 13:15:15.141: INFO: Container sonobuoy-worker ready: true, restart count 0 +Aug 24 13:15:15.142: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that NodeSelector is respected if matching [Conformance] + test/e2e/scheduling/predicates.go:466 +STEP: Trying to launch a pod without a label to get a node which can launch it. 08/24/23 13:15:15.142 +Aug 24 13:15:15.160: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-pred-4812" to be "running" +Aug 24 13:15:15.169: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 8.858024ms +Aug 24 13:15:17.180: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.020109214s +Aug 24 13:15:17.181: INFO: Pod "without-label" satisfied condition "running" +STEP: Explicitly delete pod here to free the resource it takes. 08/24/23 13:15:17.19 +STEP: Trying to apply a random label on the found node. 08/24/23 13:15:17.218 +STEP: verifying the node has the label kubernetes.io/e2e-2a7e3586-9103-4828-8200-3c8df6e49f6c 42 08/24/23 13:15:17.252 +STEP: Trying to relaunch the pod, now with labels. 08/24/23 13:15:17.276 +Aug 24 13:15:17.296: INFO: Waiting up to 5m0s for pod "with-labels" in namespace "sched-pred-4812" to be "not pending" +Aug 24 13:15:17.306: INFO: Pod "with-labels": Phase="Pending", Reason="", readiness=false. Elapsed: 10.516011ms +Aug 24 13:15:19.323: INFO: Pod "with-labels": Phase="Running", Reason="", readiness=true. Elapsed: 2.027023759s +Aug 24 13:15:19.323: INFO: Pod "with-labels" satisfied condition "not pending" +STEP: removing the label kubernetes.io/e2e-2a7e3586-9103-4828-8200-3c8df6e49f6c off the node pe9deep4seen-3 08/24/23 13:15:19.338 +STEP: verifying the node doesn't have the label kubernetes.io/e2e-2a7e3586-9103-4828-8200-3c8df6e49f6c 08/24/23 13:15:19.37 +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 17:05:54.995: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Pods +Aug 24 13:15:19.402: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:88 +[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Pods +[DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "pods-2220" for this suite. 07/29/23 17:05:55.009 +STEP: Destroying namespace "sched-pred-4812" for this suite. 08/24/23 13:15:19.414 ------------------------------ -• [4.458 seconds] -[sig-node] Pods -test/e2e/common/node/framework.go:23 - should be submitted and removed [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:226 +• [4.469 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +test/e2e/scheduling/framework.go:40 + validates that NodeSelector is respected if matching [Conformance] + test/e2e/scheduling/predicates.go:466 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Pods + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:05:50.563 - Jul 29 17:05:50.563: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename pods 07/29/23 17:05:50.565 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:50.596 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:50.601 - [BeforeEach] [sig-node] Pods + STEP: Creating a kubernetes client 08/24/23 13:15:14.973 + Aug 24 13:15:14.973: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename sched-pred 08/24/23 13:15:14.974 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:15.01 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:15.015 + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] Pods - test/e2e/common/node/pods.go:194 - [It] should be submitted and removed [NodeConformance] [Conformance] - test/e2e/common/node/pods.go:226 - STEP: creating the pod 07/29/23 17:05:50.608 - STEP: setting up watch 07/29/23 17:05:50.608 - STEP: submitting the pod to kubernetes 07/29/23 17:05:50.716 - STEP: verifying the pod is in kubernetes 07/29/23 17:05:50.734 - STEP: verifying pod creation was observed 07/29/23 17:05:50.745 - Jul 29 17:05:50.745: INFO: Waiting up to 5m0s for pod "pod-submit-remove-5a9f9e99-f8c2-4e2a-8747-2734116ed9d7" in namespace "pods-2220" to be "running" - Jul 29 17:05:50.757: INFO: Pod "pod-submit-remove-5a9f9e99-f8c2-4e2a-8747-2734116ed9d7": Phase="Pending", Reason="", readiness=false. Elapsed: 12.001103ms - Jul 29 17:05:52.765: INFO: Pod "pod-submit-remove-5a9f9e99-f8c2-4e2a-8747-2734116ed9d7": Phase="Running", Reason="", readiness=true. Elapsed: 2.020242795s - Jul 29 17:05:52.765: INFO: Pod "pod-submit-remove-5a9f9e99-f8c2-4e2a-8747-2734116ed9d7" satisfied condition "running" - STEP: deleting the pod gracefully 07/29/23 17:05:52.772 - STEP: verifying pod deletion was observed 07/29/23 17:05:52.785 - [AfterEach] [sig-node] Pods + [BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:97 + Aug 24 13:15:15.021: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready + Aug 24 13:15:15.040: INFO: Waiting for terminating namespaces to be deleted... + Aug 24 13:15:15.082: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-1 before test + Aug 24 13:15:15.104: INFO: cilium-node-init-wqpdx from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.105: INFO: Container node-init ready: true, restart count 0 + Aug 24 13:15:15.105: INFO: cilium-wpzgb from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.105: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 13:15:15.105: INFO: coredns-787d4945fb-8jnm5 from kube-system started at 2023-08-24 11:24:04 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.105: INFO: Container coredns ready: true, restart count 0 + Aug 24 13:15:15.105: INFO: coredns-787d4945fb-d76z6 from kube-system started at 2023-08-24 11:24:07 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.105: INFO: Container coredns ready: true, restart count 0 + Aug 24 13:15:15.106: INFO: kube-addon-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.106: INFO: Container kube-addon-manager ready: true, restart count 0 + Aug 24 13:15:15.106: INFO: kube-apiserver-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.106: INFO: Container kube-apiserver ready: true, restart count 0 + Aug 24 13:15:15.106: INFO: kube-controller-manager-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.106: INFO: Container kube-controller-manager ready: true, restart count 0 + Aug 24 13:15:15.106: INFO: kube-proxy-nr5bs from kube-system started at 2023-08-24 11:21:24 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.106: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 13:15:15.106: INFO: kube-scheduler-pe9deep4seen-1 from kube-system started at 2023-08-24 11:25:01 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.106: INFO: Container kube-scheduler ready: true, restart count 0 + Aug 24 13:15:15.107: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-997gw from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) + Aug 24 13:15:15.107: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 13:15:15.107: INFO: Container systemd-logs ready: true, restart count 0 + Aug 24 13:15:15.107: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-2 before test + Aug 24 13:15:15.124: INFO: cilium-node-init-95cbk from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.124: INFO: Container node-init ready: true, restart count 0 + Aug 24 13:15:15.124: INFO: cilium-operator-75f7897945-8qqz2 from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.124: INFO: Container cilium-operator ready: true, restart count 0 + Aug 24 13:15:15.124: INFO: cilium-rcknz from kube-system started at 2023-08-24 11:22:51 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.124: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 13:15:15.124: INFO: kube-addon-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:37 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.124: INFO: Container kube-addon-manager ready: true, restart count 0 + Aug 24 13:15:15.124: INFO: kube-apiserver-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.124: INFO: Container kube-apiserver ready: true, restart count 0 + Aug 24 13:15:15.124: INFO: kube-controller-manager-pe9deep4seen-2 from kube-system started at 2023-08-24 11:22:09 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.124: INFO: Container kube-controller-manager ready: true, restart count 0 + Aug 24 13:15:15.124: INFO: kube-proxy-lm2dm from kube-system started at 2023-08-24 11:22:03 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.124: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 13:15:15.124: INFO: kube-scheduler-pe9deep4seen-2 from kube-system started at 2023-08-24 11:25:19 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.124: INFO: Container kube-scheduler ready: true, restart count 0 + Aug 24 13:15:15.124: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-nxmsl from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) + Aug 24 13:15:15.124: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 13:15:15.124: INFO: Container systemd-logs ready: true, restart count 0 + Aug 24 13:15:15.124: INFO: + Logging pods the apiserver thinks is on node pe9deep4seen-3 before test + Aug 24 13:15:15.139: INFO: test-webserver-49e04467-5347-46a7-92e9-074529e73558 from container-probe-6021 started at 2023-08-24 13:14:13 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.139: INFO: Container test-webserver ready: false, restart count 0 + Aug 24 13:15:15.139: INFO: cilium-node-init-pdcw9 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.140: INFO: Container node-init ready: true, restart count 0 + Aug 24 13:15:15.140: INFO: cilium-xgc44 from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.140: INFO: Container cilium-agent ready: true, restart count 0 + Aug 24 13:15:15.140: INFO: kube-proxy-8vv8d from kube-system started at 2023-08-24 11:26:13 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.140: INFO: Container kube-proxy ready: true, restart count 0 + Aug 24 13:15:15.140: INFO: sonobuoy from sonobuoy started at 2023-08-24 11:38:19 +0000 UTC (1 container statuses recorded) + Aug 24 13:15:15.140: INFO: Container kube-sonobuoy ready: true, restart count 0 + Aug 24 13:15:15.141: INFO: sonobuoy-e2e-job-b3f52dde3e8a4a4e from sonobuoy started at 2023-08-24 11:38:31 +0000 UTC (2 container statuses recorded) + Aug 24 13:15:15.141: INFO: Container e2e ready: true, restart count 0 + Aug 24 13:15:15.141: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 13:15:15.141: INFO: sonobuoy-systemd-logs-daemon-set-872ad85b7a0e4b9a-p6l72 from sonobuoy started at 2023-08-24 11:38:32 +0000 UTC (2 container statuses recorded) + Aug 24 13:15:15.141: INFO: Container sonobuoy-worker ready: true, restart count 0 + Aug 24 13:15:15.142: INFO: Container systemd-logs ready: true, restart count 0 + [It] validates that NodeSelector is respected if matching [Conformance] + test/e2e/scheduling/predicates.go:466 + STEP: Trying to launch a pod without a label to get a node which can launch it. 08/24/23 13:15:15.142 + Aug 24 13:15:15.160: INFO: Waiting up to 1m0s for pod "without-label" in namespace "sched-pred-4812" to be "running" + Aug 24 13:15:15.169: INFO: Pod "without-label": Phase="Pending", Reason="", readiness=false. Elapsed: 8.858024ms + Aug 24 13:15:17.180: INFO: Pod "without-label": Phase="Running", Reason="", readiness=true. Elapsed: 2.020109214s + Aug 24 13:15:17.181: INFO: Pod "without-label" satisfied condition "running" + STEP: Explicitly delete pod here to free the resource it takes. 08/24/23 13:15:17.19 + STEP: Trying to apply a random label on the found node. 08/24/23 13:15:17.218 + STEP: verifying the node has the label kubernetes.io/e2e-2a7e3586-9103-4828-8200-3c8df6e49f6c 42 08/24/23 13:15:17.252 + STEP: Trying to relaunch the pod, now with labels. 08/24/23 13:15:17.276 + Aug 24 13:15:17.296: INFO: Waiting up to 5m0s for pod "with-labels" in namespace "sched-pred-4812" to be "not pending" + Aug 24 13:15:17.306: INFO: Pod "with-labels": Phase="Pending", Reason="", readiness=false. Elapsed: 10.516011ms + Aug 24 13:15:19.323: INFO: Pod "with-labels": Phase="Running", Reason="", readiness=true. Elapsed: 2.027023759s + Aug 24 13:15:19.323: INFO: Pod "with-labels" satisfied condition "not pending" + STEP: removing the label kubernetes.io/e2e-2a7e3586-9103-4828-8200-3c8df6e49f6c off the node pe9deep4seen-3 08/24/23 13:15:19.338 + STEP: verifying the node doesn't have the label kubernetes.io/e2e-2a7e3586-9103-4828-8200-3c8df6e49f6c 08/24/23 13:15:19.37 + [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 17:05:54.995: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Pods + Aug 24 13:15:19.402: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + test/e2e/scheduling/predicates.go:88 + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Pods + [DeferCleanup (Each)] [sig-scheduling] SchedulerPredicates [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "pods-2220" for this suite. 07/29/23 17:05:55.009 + STEP: Destroying namespace "sched-pred-4812" for this suite. 08/24/23 13:15:19.414 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSS +SS ------------------------------ -[sig-apps] Deployment - Deployment should have a working scale subresource [Conformance] - test/e2e/apps/deployment.go:150 -[BeforeEach] [sig-apps] Deployment +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + Should recreate evicted statefulset [Conformance] + test/e2e/apps/statefulset.go:739 +[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:05:55.023 -Jul 29 17:05:55.023: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename deployment 07/29/23 17:05:55.029 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:55.063 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:55.07 -[BeforeEach] [sig-apps] Deployment +STEP: Creating a kubernetes client 08/24/23 13:15:19.443 +Aug 24 13:15:19.443: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename statefulset 08/24/23 13:15:19.447 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:19.506 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:19.527 +[BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 -[It] Deployment should have a working scale subresource [Conformance] - test/e2e/apps/deployment.go:150 -Jul 29 17:05:55.075: INFO: Creating simple deployment test-new-deployment -Jul 29 17:05:55.097: INFO: deployment "test-new-deployment" doesn't have the required revision set -STEP: getting scale subresource 07/29/23 17:05:57.122 -STEP: updating a scale subresource 07/29/23 17:05:57.126 -STEP: verifying the deployment Spec.Replicas was modified 07/29/23 17:05:57.134 -STEP: Patch a scale subresource 07/29/23 17:05:57.14 -[AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 -Jul 29 17:05:57.163: INFO: Deployment "test-new-deployment": -&Deployment{ObjectMeta:{test-new-deployment deployment-3111 884213e7-de2c-4ce2-8918-979ce707cd79 39650 3 2023-07-29 17:05:55 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 FieldsV1 {"f:spec":{"f:replicas":{}}} scale} {e2e.test Update apps/v1 2023-07-29 17:05:55 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:05:56 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*4,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0043bd858 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-07-29 17:05:56 +0000 UTC,LastTransitionTime:2023-07-29 17:05:56 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-new-deployment-7f5969cbc7" has successfully progressed.,LastUpdateTime:2023-07-29 17:05:56 +0000 UTC,LastTransitionTime:2023-07-29 17:05:55 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - -Jul 29 17:05:57.169: INFO: New ReplicaSet "test-new-deployment-7f5969cbc7" of Deployment "test-new-deployment": -&ReplicaSet{ObjectMeta:{test-new-deployment-7f5969cbc7 deployment-3111 bd4ff8c8-4a14-4850-8293-7239715aca31 39649 2 2023-07-29 17:05:55 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-new-deployment 884213e7-de2c-4ce2-8918-979ce707cd79 0xc0036f7e17 0xc0036f7e18}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:05:56 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-07-29 17:05:57 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"884213e7-de2c-4ce2-8918-979ce707cd79\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 7f5969cbc7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0036f7ea8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} -Jul 29 17:05:57.185: INFO: Pod "test-new-deployment-7f5969cbc7-jg29d" is available: -&Pod{ObjectMeta:{test-new-deployment-7f5969cbc7-jg29d test-new-deployment-7f5969cbc7- deployment-3111 5393cd18-ad0f-4287-bab3-f9e2b21358d5 39644 0 2023-07-29 17:05:55 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet test-new-deployment-7f5969cbc7 bd4ff8c8-4a14-4850-8293-7239715aca31 0xc0043bde57 0xc0043bde58}] [] [{kube-controller-manager Update v1 2023-07-29 17:05:55 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"bd4ff8c8-4a14-4850-8293-7239715aca31\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:05:56 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.253\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-hlhtt,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hlhtt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:55 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:56 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:56 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:55 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.253,StartTime:2023-07-29 17:05:55 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:05:56 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://b99ee3add64526fa5d75f9c488198229f78e1e51d79f889cf703d70a964bdf50,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.253,},},EphemeralContainerStatuses:[]ContainerStatus{},},} -Jul 29 17:05:57.186: INFO: Pod "test-new-deployment-7f5969cbc7-rgfhh" is not available: -&Pod{ObjectMeta:{test-new-deployment-7f5969cbc7-rgfhh test-new-deployment-7f5969cbc7- deployment-3111 a5b30a79-9fa6-41c6-b189-af2400d58588 39653 0 2023-07-29 17:05:57 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet test-new-deployment-7f5969cbc7 bd4ff8c8-4a14-4850-8293-7239715aca31 0xc003f0e047 0xc003f0e048}] [] [{kube-controller-manager Update v1 2023-07-29 17:05:57 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"bd4ff8c8-4a14-4850-8293-7239715aca31\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-wmfck,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wmfck,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:57 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} -[AfterEach] [sig-apps] Deployment +[BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 +STEP: Creating service test in namespace statefulset-757 08/24/23 13:15:19.542 +[It] Should recreate evicted statefulset [Conformance] + test/e2e/apps/statefulset.go:739 +STEP: Looking for a node to schedule stateful set and pod 08/24/23 13:15:19.564 +STEP: Creating pod with conflicting port in namespace statefulset-757 08/24/23 13:15:19.582 +STEP: Waiting until pod test-pod will start running in namespace statefulset-757 08/24/23 13:15:19.608 +Aug 24 13:15:19.608: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "statefulset-757" to be "running" +Aug 24 13:15:19.628: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 19.774541ms +Aug 24 13:15:21.635: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.026445687s +Aug 24 13:15:21.635: INFO: Pod "test-pod" satisfied condition "running" +STEP: Creating statefulset with conflicting port in namespace statefulset-757 08/24/23 13:15:21.635 +STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-757 08/24/23 13:15:21.643 +Aug 24 13:15:21.670: INFO: Observed stateful pod in namespace: statefulset-757, name: ss-0, uid: 85a0bd14-d783-4049-a40c-26a12a5a1d1d, status phase: Pending. Waiting for statefulset controller to delete. +Aug 24 13:15:21.698: INFO: Observed stateful pod in namespace: statefulset-757, name: ss-0, uid: 85a0bd14-d783-4049-a40c-26a12a5a1d1d, status phase: Failed. Waiting for statefulset controller to delete. +Aug 24 13:15:21.718: INFO: Observed stateful pod in namespace: statefulset-757, name: ss-0, uid: 85a0bd14-d783-4049-a40c-26a12a5a1d1d, status phase: Failed. Waiting for statefulset controller to delete. +Aug 24 13:15:21.722: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-757 +STEP: Removing pod with conflicting port in namespace statefulset-757 08/24/23 13:15:21.722 +STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-757 and will be in running state 08/24/23 13:15:21.759 +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 +Aug 24 13:15:35.833: INFO: Deleting all statefulset in ns statefulset-757 +Aug 24 13:15:35.840: INFO: Scaling statefulset ss to 0 +Aug 24 13:15:45.882: INFO: Waiting for statefulset status.replicas updated to 0 +Aug 24 13:15:45.889: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 -Jul 29 17:05:57.186: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Deployment +Aug 24 13:15:45.936: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Deployment +[DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 -STEP: Destroying namespace "deployment-3111" for this suite. 07/29/23 17:05:57.204 +STEP: Destroying namespace "statefulset-757" for this suite. 08/24/23 13:15:45.946 ------------------------------ -• [2.200 seconds] -[sig-apps] Deployment +• [SLOW TEST] [26.518 seconds] +[sig-apps] StatefulSet test/e2e/apps/framework.go:23 - Deployment should have a working scale subresource [Conformance] - test/e2e/apps/deployment.go:150 + Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:103 + Should recreate evicted statefulset [Conformance] + test/e2e/apps/statefulset.go:739 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Deployment + [BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:05:55.023 - Jul 29 17:05:55.023: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename deployment 07/29/23 17:05:55.029 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:55.063 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:55.07 - [BeforeEach] [sig-apps] Deployment + STEP: Creating a kubernetes client 08/24/23 13:15:19.443 + Aug 24 13:15:19.443: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename statefulset 08/24/23 13:15:19.447 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:19.506 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:19.527 + [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:91 - [It] Deployment should have a working scale subresource [Conformance] - test/e2e/apps/deployment.go:150 - Jul 29 17:05:55.075: INFO: Creating simple deployment test-new-deployment - Jul 29 17:05:55.097: INFO: deployment "test-new-deployment" doesn't have the required revision set - STEP: getting scale subresource 07/29/23 17:05:57.122 - STEP: updating a scale subresource 07/29/23 17:05:57.126 - STEP: verifying the deployment Spec.Replicas was modified 07/29/23 17:05:57.134 - STEP: Patch a scale subresource 07/29/23 17:05:57.14 - [AfterEach] [sig-apps] Deployment - test/e2e/apps/deployment.go:84 - Jul 29 17:05:57.163: INFO: Deployment "test-new-deployment": - &Deployment{ObjectMeta:{test-new-deployment deployment-3111 884213e7-de2c-4ce2-8918-979ce707cd79 39650 3 2023-07-29 17:05:55 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 FieldsV1 {"f:spec":{"f:replicas":{}}} scale} {e2e.test Update apps/v1 2023-07-29 17:05:55 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2023-07-29 17:05:56 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*4,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0043bd858 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2023-07-29 17:05:56 +0000 UTC,LastTransitionTime:2023-07-29 17:05:56 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-new-deployment-7f5969cbc7" has successfully progressed.,LastUpdateTime:2023-07-29 17:05:56 +0000 UTC,LastTransitionTime:2023-07-29 17:05:55 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} - - Jul 29 17:05:57.169: INFO: New ReplicaSet "test-new-deployment-7f5969cbc7" of Deployment "test-new-deployment": - &ReplicaSet{ObjectMeta:{test-new-deployment-7f5969cbc7 deployment-3111 bd4ff8c8-4a14-4850-8293-7239715aca31 39649 2 2023-07-29 17:05:55 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-new-deployment 884213e7-de2c-4ce2-8918-979ce707cd79 0xc0036f7e17 0xc0036f7e18}] [] [{kube-controller-manager Update apps/v1 2023-07-29 17:05:56 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status} {kube-controller-manager Update apps/v1 2023-07-29 17:05:57 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"884213e7-de2c-4ce2-8918-979ce707cd79\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} }]},Spec:ReplicaSetSpec{Replicas:*2,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 7f5969cbc7,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [] [] []} {[] [] [{httpd registry.k8s.io/e2e-test-images/httpd:2.4.38-4 [] [] [] [] [] {map[] map[] []} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0036f7ea8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] nil [] []}},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} - Jul 29 17:05:57.185: INFO: Pod "test-new-deployment-7f5969cbc7-jg29d" is available: - &Pod{ObjectMeta:{test-new-deployment-7f5969cbc7-jg29d test-new-deployment-7f5969cbc7- deployment-3111 5393cd18-ad0f-4287-bab3-f9e2b21358d5 39644 0 2023-07-29 17:05:55 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet test-new-deployment-7f5969cbc7 bd4ff8c8-4a14-4850-8293-7239715aca31 0xc0043bde57 0xc0043bde58}] [] [{kube-controller-manager Update v1 2023-07-29 17:05:55 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"bd4ff8c8-4a14-4850-8293-7239715aca31\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2023-07-29 17:05:56 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.233.66.253\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-hlhtt,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hlhtt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-3,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:55 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:56 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:56 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:55 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:192.168.121.141,PodIP:10.233.66.253,StartTime:2023-07-29 17:05:55 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2023-07-29 17:05:56 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,ImageID:registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22,ContainerID:cri-o://b99ee3add64526fa5d75f9c488198229f78e1e51d79f889cf703d70a964bdf50,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:10.233.66.253,},},EphemeralContainerStatuses:[]ContainerStatus{},},} - Jul 29 17:05:57.186: INFO: Pod "test-new-deployment-7f5969cbc7-rgfhh" is not available: - &Pod{ObjectMeta:{test-new-deployment-7f5969cbc7-rgfhh test-new-deployment-7f5969cbc7- deployment-3111 a5b30a79-9fa6-41c6-b189-af2400d58588 39653 0 2023-07-29 17:05:57 +0000 UTC map[name:httpd pod-template-hash:7f5969cbc7] map[] [{apps/v1 ReplicaSet test-new-deployment-7f5969cbc7 bd4ff8c8-4a14-4850-8293-7239715aca31 0xc003f0e047 0xc003f0e048}] [] [{kube-controller-manager Update v1 2023-07-29 17:05:57 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"bd4ff8c8-4a14-4850-8293-7239715aca31\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-wmfck,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:registry.k8s.io/e2e-test-images/httpd:2.4.38-4,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wmfck,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:wetuj3nuajog-2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,OS:nil,HostUsers:nil,SchedulingGates:[]PodSchedulingGate{},ResourceClaims:[]PodResourceClaim{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2023-07-29 17:05:57 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} - [AfterEach] [sig-apps] Deployment + [BeforeEach] [sig-apps] StatefulSet + test/e2e/apps/statefulset.go:98 + [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:113 + STEP: Creating service test in namespace statefulset-757 08/24/23 13:15:19.542 + [It] Should recreate evicted statefulset [Conformance] + test/e2e/apps/statefulset.go:739 + STEP: Looking for a node to schedule stateful set and pod 08/24/23 13:15:19.564 + STEP: Creating pod with conflicting port in namespace statefulset-757 08/24/23 13:15:19.582 + STEP: Waiting until pod test-pod will start running in namespace statefulset-757 08/24/23 13:15:19.608 + Aug 24 13:15:19.608: INFO: Waiting up to 5m0s for pod "test-pod" in namespace "statefulset-757" to be "running" + Aug 24 13:15:19.628: INFO: Pod "test-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 19.774541ms + Aug 24 13:15:21.635: INFO: Pod "test-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.026445687s + Aug 24 13:15:21.635: INFO: Pod "test-pod" satisfied condition "running" + STEP: Creating statefulset with conflicting port in namespace statefulset-757 08/24/23 13:15:21.635 + STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-757 08/24/23 13:15:21.643 + Aug 24 13:15:21.670: INFO: Observed stateful pod in namespace: statefulset-757, name: ss-0, uid: 85a0bd14-d783-4049-a40c-26a12a5a1d1d, status phase: Pending. Waiting for statefulset controller to delete. + Aug 24 13:15:21.698: INFO: Observed stateful pod in namespace: statefulset-757, name: ss-0, uid: 85a0bd14-d783-4049-a40c-26a12a5a1d1d, status phase: Failed. Waiting for statefulset controller to delete. + Aug 24 13:15:21.718: INFO: Observed stateful pod in namespace: statefulset-757, name: ss-0, uid: 85a0bd14-d783-4049-a40c-26a12a5a1d1d, status phase: Failed. Waiting for statefulset controller to delete. + Aug 24 13:15:21.722: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-757 + STEP: Removing pod with conflicting port in namespace statefulset-757 08/24/23 13:15:21.722 + STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-757 and will be in running state 08/24/23 13:15:21.759 + [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + test/e2e/apps/statefulset.go:124 + Aug 24 13:15:35.833: INFO: Deleting all statefulset in ns statefulset-757 + Aug 24 13:15:35.840: INFO: Scaling statefulset ss to 0 + Aug 24 13:15:45.882: INFO: Waiting for statefulset status.replicas updated to 0 + Aug 24 13:15:45.889: INFO: Deleting statefulset ss + [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 - Jul 29 17:05:57.186: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Deployment + Aug 24 13:15:45.936: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Deployment + [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 - STEP: Destroying namespace "deployment-3111" for this suite. 07/29/23 17:05:57.204 + STEP: Destroying namespace "statefulset-757" for this suite. 08/24/23 13:15:45.946 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSS +SS ------------------------------ -[sig-node] InitContainer [NodeConformance] - should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] - test/e2e/common/node/init_container.go:458 -[BeforeEach] [sig-node] InitContainer [NodeConformance] +[sig-storage] Downward API volume + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:68 +[BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:05:57.227 -Jul 29 17:05:57.227: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename init-container 07/29/23 17:05:57.229 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:57.261 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:57.265 -[BeforeEach] [sig-node] InitContainer [NodeConformance] +STEP: Creating a kubernetes client 08/24/23 13:15:45.965 +Aug 24 13:15:45.965: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 13:15:45.967 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:46.006 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:46.01 +[BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] InitContainer [NodeConformance] - test/e2e/common/node/init_container.go:165 -[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] - test/e2e/common/node/init_container.go:458 -STEP: creating the pod 07/29/23 17:05:57.271 -Jul 29 17:05:57.272: INFO: PodSpec: initContainers in spec.initContainers -[AfterEach] [sig-node] InitContainer [NodeConformance] +[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 +[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:68 +STEP: Creating a pod to test downward API volume plugin 08/24/23 13:15:46.013 +Aug 24 13:15:46.030: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70" in namespace "downward-api-4466" to be "Succeeded or Failed" +Aug 24 13:15:46.037: INFO: Pod "downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70": Phase="Pending", Reason="", readiness=false. Elapsed: 6.919654ms +Aug 24 13:15:48.048: INFO: Pod "downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017489176s +Aug 24 13:15:50.047: INFO: Pod "downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016839894s +STEP: Saw pod success 08/24/23 13:15:50.047 +Aug 24 13:15:50.048: INFO: Pod "downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70" satisfied condition "Succeeded or Failed" +Aug 24 13:15:50.054: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70 container client-container: +STEP: delete the pod 08/24/23 13:15:50.086 +Aug 24 13:15:50.120: INFO: Waiting for pod downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70 to disappear +Aug 24 13:15:50.127: INFO: Pod downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70 no longer exists +[AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:02.021: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +Aug 24 13:15:50.127: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +[DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +[DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "init-container-6366" for this suite. 07/29/23 17:06:02.036 +STEP: Destroying namespace "downward-api-4466" for this suite. 08/24/23 13:15:50.15 ------------------------------ -• [4.820 seconds] -[sig-node] InitContainer [NodeConformance] -test/e2e/common/node/framework.go:23 - should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] - test/e2e/common/node/init_container.go:458 +• [4.200 seconds] +[sig-storage] Downward API volume +test/e2e/common/storage/framework.go:23 + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:68 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] InitContainer [NodeConformance] + [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:05:57.227 - Jul 29 17:05:57.227: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename init-container 07/29/23 17:05:57.229 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:05:57.261 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:05:57.265 - [BeforeEach] [sig-node] InitContainer [NodeConformance] + STEP: Creating a kubernetes client 08/24/23 13:15:45.965 + Aug 24 13:15:45.965: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 13:15:45.967 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:46.006 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:46.01 + [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] InitContainer [NodeConformance] - test/e2e/common/node/init_container.go:165 - [It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] - test/e2e/common/node/init_container.go:458 - STEP: creating the pod 07/29/23 17:05:57.271 - Jul 29 17:05:57.272: INFO: PodSpec: initContainers in spec.initContainers - [AfterEach] [sig-node] InitContainer [NodeConformance] + [BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 + [It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:68 + STEP: Creating a pod to test downward API volume plugin 08/24/23 13:15:46.013 + Aug 24 13:15:46.030: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70" in namespace "downward-api-4466" to be "Succeeded or Failed" + Aug 24 13:15:46.037: INFO: Pod "downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70": Phase="Pending", Reason="", readiness=false. Elapsed: 6.919654ms + Aug 24 13:15:48.048: INFO: Pod "downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017489176s + Aug 24 13:15:50.047: INFO: Pod "downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016839894s + STEP: Saw pod success 08/24/23 13:15:50.047 + Aug 24 13:15:50.048: INFO: Pod "downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70" satisfied condition "Succeeded or Failed" + Aug 24 13:15:50.054: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70 container client-container: + STEP: delete the pod 08/24/23 13:15:50.086 + Aug 24 13:15:50.120: INFO: Waiting for pod downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70 to disappear + Aug 24 13:15:50.127: INFO: Pod downwardapi-volume-9cf4db35-3e8a-445b-9a82-f63e23f79a70 no longer exists + [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:02.021: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + Aug 24 13:15:50.127: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "init-container-6366" for this suite. 07/29/23 17:06:02.036 + STEP: Destroying namespace "downward-api-4466" for this suite. 08/24/23 13:15:50.15 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSS ------------------------------ -[sig-auth] Certificates API [Privileged:ClusterAdmin] - should support CSR API operations [Conformance] - test/e2e/auth/certificates.go:200 -[BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] +[sig-apps] ReplicaSet + should list and delete a collection of ReplicaSets [Conformance] + test/e2e/apps/replica_set.go:165 +[BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:02.054 -Jul 29 17:06:02.054: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename certificates 07/29/23 17:06:02.057 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:02.087 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:02.093 -[BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 13:15:50.167 +Aug 24 13:15:50.167: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename replicaset 08/24/23 13:15:50.169 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:50.205 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:50.212 +[BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 -[It] should support CSR API operations [Conformance] - test/e2e/auth/certificates.go:200 -STEP: getting /apis 07/29/23 17:06:03.979 -STEP: getting /apis/certificates.k8s.io 07/29/23 17:06:03.987 -STEP: getting /apis/certificates.k8s.io/v1 07/29/23 17:06:03.989 -STEP: creating 07/29/23 17:06:03.991 -STEP: getting 07/29/23 17:06:04.022 -STEP: listing 07/29/23 17:06:04.027 -STEP: watching 07/29/23 17:06:04.032 -Jul 29 17:06:04.033: INFO: starting watch -STEP: patching 07/29/23 17:06:04.036 -STEP: updating 07/29/23 17:06:04.045 -Jul 29 17:06:04.056: INFO: waiting for watch events with expected annotations -Jul 29 17:06:04.056: INFO: saw patched and updated annotations -STEP: getting /approval 07/29/23 17:06:04.057 -STEP: patching /approval 07/29/23 17:06:04.064 -STEP: updating /approval 07/29/23 17:06:04.076 -STEP: getting /status 07/29/23 17:06:04.094 -STEP: patching /status 07/29/23 17:06:04.099 -STEP: updating /status 07/29/23 17:06:04.113 -STEP: deleting 07/29/23 17:06:04.126 -STEP: deleting a collection 07/29/23 17:06:04.143 -[AfterEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] +[It] should list and delete a collection of ReplicaSets [Conformance] + test/e2e/apps/replica_set.go:165 +STEP: Create a ReplicaSet 08/24/23 13:15:50.218 +STEP: Verify that the required pods have come up 08/24/23 13:15:50.233 +Aug 24 13:15:50.243: INFO: Pod name sample-pod: Found 0 pods out of 3 +Aug 24 13:15:55.261: INFO: Pod name sample-pod: Found 3 pods out of 3 +STEP: ensuring each pod is running 08/24/23 13:15:55.261 +Aug 24 13:15:55.269: INFO: Replica Status: {Replicas:3 FullyLabeledReplicas:3 ReadyReplicas:3 AvailableReplicas:3 ObservedGeneration:1 Conditions:[]} +STEP: Listing all ReplicaSets 08/24/23 13:15:55.27 +STEP: DeleteCollection of the ReplicaSets 08/24/23 13:15:55.282 +STEP: After DeleteCollection verify that ReplicaSets have been deleted 08/24/23 13:15:55.305 +[AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:04.168: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] +Aug 24 13:15:55.314: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 -STEP: Destroying namespace "certificates-7648" for this suite. 07/29/23 17:06:04.18 +STEP: Destroying namespace "replicaset-8834" for this suite. 08/24/23 13:15:55.339 ------------------------------ -• [2.139 seconds] -[sig-auth] Certificates API [Privileged:ClusterAdmin] -test/e2e/auth/framework.go:23 - should support CSR API operations [Conformance] - test/e2e/auth/certificates.go:200 +• [SLOW TEST] [5.234 seconds] +[sig-apps] ReplicaSet +test/e2e/apps/framework.go:23 + should list and delete a collection of ReplicaSets [Conformance] + test/e2e/apps/replica_set.go:165 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] + [BeforeEach] [sig-apps] ReplicaSet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:02.054 - Jul 29 17:06:02.054: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename certificates 07/29/23 17:06:02.057 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:02.087 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:02.093 - [BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 13:15:50.167 + Aug 24 13:15:50.167: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename replicaset 08/24/23 13:15:50.169 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:50.205 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:50.212 + [BeforeEach] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:31 - [It] should support CSR API operations [Conformance] - test/e2e/auth/certificates.go:200 - STEP: getting /apis 07/29/23 17:06:03.979 - STEP: getting /apis/certificates.k8s.io 07/29/23 17:06:03.987 - STEP: getting /apis/certificates.k8s.io/v1 07/29/23 17:06:03.989 - STEP: creating 07/29/23 17:06:03.991 - STEP: getting 07/29/23 17:06:04.022 - STEP: listing 07/29/23 17:06:04.027 - STEP: watching 07/29/23 17:06:04.032 - Jul 29 17:06:04.033: INFO: starting watch - STEP: patching 07/29/23 17:06:04.036 - STEP: updating 07/29/23 17:06:04.045 - Jul 29 17:06:04.056: INFO: waiting for watch events with expected annotations - Jul 29 17:06:04.056: INFO: saw patched and updated annotations - STEP: getting /approval 07/29/23 17:06:04.057 - STEP: patching /approval 07/29/23 17:06:04.064 - STEP: updating /approval 07/29/23 17:06:04.076 - STEP: getting /status 07/29/23 17:06:04.094 - STEP: patching /status 07/29/23 17:06:04.099 - STEP: updating /status 07/29/23 17:06:04.113 - STEP: deleting 07/29/23 17:06:04.126 - STEP: deleting a collection 07/29/23 17:06:04.143 - [AfterEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] + [It] should list and delete a collection of ReplicaSets [Conformance] + test/e2e/apps/replica_set.go:165 + STEP: Create a ReplicaSet 08/24/23 13:15:50.218 + STEP: Verify that the required pods have come up 08/24/23 13:15:50.233 + Aug 24 13:15:50.243: INFO: Pod name sample-pod: Found 0 pods out of 3 + Aug 24 13:15:55.261: INFO: Pod name sample-pod: Found 3 pods out of 3 + STEP: ensuring each pod is running 08/24/23 13:15:55.261 + Aug 24 13:15:55.269: INFO: Replica Status: {Replicas:3 FullyLabeledReplicas:3 ReadyReplicas:3 AvailableReplicas:3 ObservedGeneration:1 Conditions:[]} + STEP: Listing all ReplicaSets 08/24/23 13:15:55.27 + STEP: DeleteCollection of the ReplicaSets 08/24/23 13:15:55.282 + STEP: After DeleteCollection verify that ReplicaSets have been deleted 08/24/23 13:15:55.305 + [AfterEach] [sig-apps] ReplicaSet test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:04.168: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] + Aug 24 13:15:55.314: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] ReplicaSet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-apps] ReplicaSet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-auth] Certificates API [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-apps] ReplicaSet tear down framework | framework.go:193 - STEP: Destroying namespace "certificates-7648" for this suite. 07/29/23 17:06:04.18 + STEP: Destroying namespace "replicaset-8834" for this suite. 08/24/23 13:15:55.339 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSS +SS ------------------------------ -[sig-apps] ReplicationController - should get and update a ReplicationController scale [Conformance] - test/e2e/apps/rc.go:402 -[BeforeEach] [sig-apps] ReplicationController +[sig-storage] ConfigMap + binary data should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:175 +[BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:04.195 -Jul 29 17:06:04.195: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename replication-controller 07/29/23 17:06:04.2 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:04.231 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:04.236 -[BeforeEach] [sig-apps] ReplicationController +STEP: Creating a kubernetes client 08/24/23 13:15:55.401 +Aug 24 13:15:55.401: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 13:15:55.406 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:55.518 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:55.536 +[BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 -[It] should get and update a ReplicationController scale [Conformance] - test/e2e/apps/rc.go:402 -STEP: Creating ReplicationController "e2e-rc-mgf79" 07/29/23 17:06:04.24 -Jul 29 17:06:04.248: INFO: Get Replication Controller "e2e-rc-mgf79" to confirm replicas -Jul 29 17:06:05.256: INFO: Get Replication Controller "e2e-rc-mgf79" to confirm replicas -Jul 29 17:06:05.265: INFO: Found 1 replicas for "e2e-rc-mgf79" replication controller -STEP: Getting scale subresource for ReplicationController "e2e-rc-mgf79" 07/29/23 17:06:05.265 -STEP: Updating a scale subresource 07/29/23 17:06:05.272 -STEP: Verifying replicas where modified for replication controller "e2e-rc-mgf79" 07/29/23 17:06:05.283 -Jul 29 17:06:05.283: INFO: Get Replication Controller "e2e-rc-mgf79" to confirm replicas -Jul 29 17:06:06.287: INFO: Get Replication Controller "e2e-rc-mgf79" to confirm replicas -Jul 29 17:06:06.294: INFO: Found 2 replicas for "e2e-rc-mgf79" replication controller -[AfterEach] [sig-apps] ReplicationController +[It] binary data should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:175 +STEP: Creating configMap with name configmap-test-upd-3c762b17-c12b-4937-82a9-ea61c64370f1 08/24/23 13:15:55.568 +STEP: Creating the pod 08/24/23 13:15:55.582 +Aug 24 13:15:55.611: INFO: Waiting up to 5m0s for pod "pod-configmaps-d4c12069-4452-457a-8629-9fa4b378390f" in namespace "configmap-6443" to be "running" +Aug 24 13:15:55.619: INFO: Pod "pod-configmaps-d4c12069-4452-457a-8629-9fa4b378390f": Phase="Pending", Reason="", readiness=false. Elapsed: 7.788532ms +Aug 24 13:15:57.631: INFO: Pod "pod-configmaps-d4c12069-4452-457a-8629-9fa4b378390f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019443279s +Aug 24 13:15:59.628: INFO: Pod "pod-configmaps-d4c12069-4452-457a-8629-9fa4b378390f": Phase="Running", Reason="", readiness=false. Elapsed: 4.016167175s +Aug 24 13:15:59.628: INFO: Pod "pod-configmaps-d4c12069-4452-457a-8629-9fa4b378390f" satisfied condition "running" +STEP: Waiting for pod with text data 08/24/23 13:15:59.628 +STEP: Waiting for pod with binary data 08/24/23 13:15:59.64 +[AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:06.294: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] ReplicationController +Aug 24 13:15:59.653: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] ReplicationController +[DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] ReplicationController +[DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "replication-controller-7400" for this suite. 07/29/23 17:06:06.301 +STEP: Destroying namespace "configmap-6443" for this suite. 08/24/23 13:15:59.665 ------------------------------ -• [2.119 seconds] -[sig-apps] ReplicationController -test/e2e/apps/framework.go:23 - should get and update a ReplicationController scale [Conformance] - test/e2e/apps/rc.go:402 +• [4.279 seconds] +[sig-storage] ConfigMap +test/e2e/common/storage/framework.go:23 + binary data should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:175 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] ReplicationController + [BeforeEach] [sig-storage] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:04.195 - Jul 29 17:06:04.195: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename replication-controller 07/29/23 17:06:04.2 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:04.231 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:04.236 - [BeforeEach] [sig-apps] ReplicationController + STEP: Creating a kubernetes client 08/24/23 13:15:55.401 + Aug 24 13:15:55.401: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 13:15:55.406 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:55.518 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:55.536 + [BeforeEach] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] ReplicationController - test/e2e/apps/rc.go:57 - [It] should get and update a ReplicationController scale [Conformance] - test/e2e/apps/rc.go:402 - STEP: Creating ReplicationController "e2e-rc-mgf79" 07/29/23 17:06:04.24 - Jul 29 17:06:04.248: INFO: Get Replication Controller "e2e-rc-mgf79" to confirm replicas - Jul 29 17:06:05.256: INFO: Get Replication Controller "e2e-rc-mgf79" to confirm replicas - Jul 29 17:06:05.265: INFO: Found 1 replicas for "e2e-rc-mgf79" replication controller - STEP: Getting scale subresource for ReplicationController "e2e-rc-mgf79" 07/29/23 17:06:05.265 - STEP: Updating a scale subresource 07/29/23 17:06:05.272 - STEP: Verifying replicas where modified for replication controller "e2e-rc-mgf79" 07/29/23 17:06:05.283 - Jul 29 17:06:05.283: INFO: Get Replication Controller "e2e-rc-mgf79" to confirm replicas - Jul 29 17:06:06.287: INFO: Get Replication Controller "e2e-rc-mgf79" to confirm replicas - Jul 29 17:06:06.294: INFO: Found 2 replicas for "e2e-rc-mgf79" replication controller - [AfterEach] [sig-apps] ReplicationController + [It] binary data should be reflected in volume [NodeConformance] [Conformance] + test/e2e/common/storage/configmap_volume.go:175 + STEP: Creating configMap with name configmap-test-upd-3c762b17-c12b-4937-82a9-ea61c64370f1 08/24/23 13:15:55.568 + STEP: Creating the pod 08/24/23 13:15:55.582 + Aug 24 13:15:55.611: INFO: Waiting up to 5m0s for pod "pod-configmaps-d4c12069-4452-457a-8629-9fa4b378390f" in namespace "configmap-6443" to be "running" + Aug 24 13:15:55.619: INFO: Pod "pod-configmaps-d4c12069-4452-457a-8629-9fa4b378390f": Phase="Pending", Reason="", readiness=false. Elapsed: 7.788532ms + Aug 24 13:15:57.631: INFO: Pod "pod-configmaps-d4c12069-4452-457a-8629-9fa4b378390f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019443279s + Aug 24 13:15:59.628: INFO: Pod "pod-configmaps-d4c12069-4452-457a-8629-9fa4b378390f": Phase="Running", Reason="", readiness=false. Elapsed: 4.016167175s + Aug 24 13:15:59.628: INFO: Pod "pod-configmaps-d4c12069-4452-457a-8629-9fa4b378390f" satisfied condition "running" + STEP: Waiting for pod with text data 08/24/23 13:15:59.628 + STEP: Waiting for pod with binary data 08/24/23 13:15:59.64 + [AfterEach] [sig-storage] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:06.294: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] ReplicationController + Aug 24 13:15:59.653: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] ReplicationController + [DeferCleanup (Each)] [sig-storage] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] ReplicationController + [DeferCleanup (Each)] [sig-storage] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "replication-controller-7400" for this suite. 07/29/23 17:06:06.301 + STEP: Destroying namespace "configmap-6443" for this suite. 08/24/23 13:15:59.665 << End Captured GinkgoWriter Output ------------------------------ -SSSSSS +SSSSSSSS ------------------------------ -[sig-auth] ServiceAccounts - should mount an API token into pods [Conformance] - test/e2e/auth/service_accounts.go:78 -[BeforeEach] [sig-auth] ServiceAccounts +[sig-storage] Secrets + should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:47 +[BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:06.317 -Jul 29 17:06:06.317: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename svcaccounts 07/29/23 17:06:06.319 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:06.348 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:06.352 -[BeforeEach] [sig-auth] ServiceAccounts +STEP: Creating a kubernetes client 08/24/23 13:15:59.682 +Aug 24 13:15:59.682: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 13:15:59.684 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:59.707 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:59.712 +[BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 -[It] should mount an API token into pods [Conformance] - test/e2e/auth/service_accounts.go:78 -Jul 29 17:06:06.379: INFO: Waiting up to 5m0s for pod "pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2" in namespace "svcaccounts-7736" to be "running" -Jul 29 17:06:06.384: INFO: Pod "pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2": Phase="Pending", Reason="", readiness=false. Elapsed: 4.713911ms -Jul 29 17:06:08.394: INFO: Pod "pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2": Phase="Running", Reason="", readiness=true. Elapsed: 2.01498744s -Jul 29 17:06:08.394: INFO: Pod "pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2" satisfied condition "running" -STEP: reading a file in the container 07/29/23 17:06:08.394 -Jul 29 17:06:08.395: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-7736 pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token' -STEP: reading a file in the container 07/29/23 17:06:08.648 -Jul 29 17:06:08.649: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-7736 pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt' -STEP: reading a file in the container 07/29/23 17:06:08.922 -Jul 29 17:06:08.922: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-7736 pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace' -Jul 29 17:06:09.167: INFO: Got root ca configmap in namespace "svcaccounts-7736" -[AfterEach] [sig-auth] ServiceAccounts +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:47 +STEP: Creating secret with name secret-test-7b1ea301-d718-45b9-ae99-739402a6eb17 08/24/23 13:15:59.716 +STEP: Creating a pod to test consume secrets 08/24/23 13:15:59.721 +Aug 24 13:15:59.735: INFO: Waiting up to 5m0s for pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e" in namespace "secrets-1659" to be "Succeeded or Failed" +Aug 24 13:15:59.745: INFO: Pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e": Phase="Pending", Reason="", readiness=false. Elapsed: 10.174606ms +Aug 24 13:16:01.755: INFO: Pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e": Phase="Running", Reason="", readiness=true. Elapsed: 2.020335982s +Aug 24 13:16:03.757: INFO: Pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e": Phase="Running", Reason="", readiness=false. Elapsed: 4.021766486s +Aug 24 13:16:05.760: INFO: Pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.025450802s +STEP: Saw pod success 08/24/23 13:16:05.76 +Aug 24 13:16:05.761: INFO: Pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e" satisfied condition "Succeeded or Failed" +Aug 24 13:16:05.767: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e container secret-volume-test: +STEP: delete the pod 08/24/23 13:16:05.78 +Aug 24 13:16:05.807: INFO: Waiting for pod pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e to disappear +Aug 24 13:16:05.815: INFO: Pod pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e no longer exists +[AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:09.171: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +Aug 24 13:16:05.815: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-auth] ServiceAccounts +[DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 -STEP: Destroying namespace "svcaccounts-7736" for this suite. 07/29/23 17:06:09.178 +STEP: Destroying namespace "secrets-1659" for this suite. 08/24/23 13:16:05.826 ------------------------------ -• [2.873 seconds] -[sig-auth] ServiceAccounts -test/e2e/auth/framework.go:23 - should mount an API token into pods [Conformance] - test/e2e/auth/service_accounts.go:78 +• [SLOW TEST] [6.165 seconds] +[sig-storage] Secrets +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:47 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-auth] ServiceAccounts + [BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:06.317 - Jul 29 17:06:06.317: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename svcaccounts 07/29/23 17:06:06.319 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:06.348 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:06.352 - [BeforeEach] [sig-auth] ServiceAccounts + STEP: Creating a kubernetes client 08/24/23 13:15:59.682 + Aug 24 13:15:59.682: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 13:15:59.684 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:15:59.707 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:15:59.712 + [BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 - [It] should mount an API token into pods [Conformance] - test/e2e/auth/service_accounts.go:78 - Jul 29 17:06:06.379: INFO: Waiting up to 5m0s for pod "pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2" in namespace "svcaccounts-7736" to be "running" - Jul 29 17:06:06.384: INFO: Pod "pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2": Phase="Pending", Reason="", readiness=false. Elapsed: 4.713911ms - Jul 29 17:06:08.394: INFO: Pod "pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2": Phase="Running", Reason="", readiness=true. Elapsed: 2.01498744s - Jul 29 17:06:08.394: INFO: Pod "pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2" satisfied condition "running" - STEP: reading a file in the container 07/29/23 17:06:08.394 - Jul 29 17:06:08.395: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-7736 pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token' - STEP: reading a file in the container 07/29/23 17:06:08.648 - Jul 29 17:06:08.649: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-7736 pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt' - STEP: reading a file in the container 07/29/23 17:06:08.922 - Jul 29 17:06:08.922: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-7736 pod-service-account-534b2128-bf4b-4b3c-8e22-f2bb9f8fa6c2 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace' - Jul 29 17:06:09.167: INFO: Got root ca configmap in namespace "svcaccounts-7736" - [AfterEach] [sig-auth] ServiceAccounts + [It] should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:47 + STEP: Creating secret with name secret-test-7b1ea301-d718-45b9-ae99-739402a6eb17 08/24/23 13:15:59.716 + STEP: Creating a pod to test consume secrets 08/24/23 13:15:59.721 + Aug 24 13:15:59.735: INFO: Waiting up to 5m0s for pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e" in namespace "secrets-1659" to be "Succeeded or Failed" + Aug 24 13:15:59.745: INFO: Pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e": Phase="Pending", Reason="", readiness=false. Elapsed: 10.174606ms + Aug 24 13:16:01.755: INFO: Pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e": Phase="Running", Reason="", readiness=true. Elapsed: 2.020335982s + Aug 24 13:16:03.757: INFO: Pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e": Phase="Running", Reason="", readiness=false. Elapsed: 4.021766486s + Aug 24 13:16:05.760: INFO: Pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.025450802s + STEP: Saw pod success 08/24/23 13:16:05.76 + Aug 24 13:16:05.761: INFO: Pod "pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e" satisfied condition "Succeeded or Failed" + Aug 24 13:16:05.767: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e container secret-volume-test: + STEP: delete the pod 08/24/23 13:16:05.78 + Aug 24 13:16:05.807: INFO: Waiting for pod pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e to disappear + Aug 24 13:16:05.815: INFO: Pod pod-secrets-ad29ccc7-37e7-498d-ad64-3334f179d07e no longer exists + [AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:09.171: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + Aug 24 13:16:05.815: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-auth] ServiceAccounts + [DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "svcaccounts-7736" for this suite. 07/29/23 17:06:09.178 + STEP: Destroying namespace "secrets-1659" for this suite. 08/24/23 13:16:05.826 << End Captured GinkgoWriter Output ------------------------------ -SSSSSS +SSSSSSS ------------------------------ -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] - should include custom resource definition resources in discovery documents [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:198 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[sig-node] Kubelet when scheduling a busybox command in a pod + should print the output to logs [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:52 +[BeforeEach] [sig-node] Kubelet set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:09.192 -Jul 29 17:06:09.192: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename custom-resource-definition 07/29/23 17:06:09.196 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:09.25 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:09.256 -[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 13:16:05.854 +Aug 24 13:16:05.854: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubelet-test 08/24/23 13:16:05.868 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:05.915 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:05.922 +[BeforeEach] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:31 -[It] should include custom resource definition resources in discovery documents [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:198 -STEP: fetching the /apis discovery document 07/29/23 17:06:09.261 -STEP: finding the apiextensions.k8s.io API group in the /apis discovery document 07/29/23 17:06:09.263 -STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis discovery document 07/29/23 17:06:09.263 -STEP: fetching the /apis/apiextensions.k8s.io discovery document 07/29/23 17:06:09.263 -STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis/apiextensions.k8s.io discovery document 07/29/23 17:06:09.265 -STEP: fetching the /apis/apiextensions.k8s.io/v1 discovery document 07/29/23 17:06:09.265 -STEP: finding customresourcedefinitions resources in the /apis/apiextensions.k8s.io/v1 discovery document 07/29/23 17:06:09.267 -[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[BeforeEach] [sig-node] Kubelet + test/e2e/common/node/kubelet.go:41 +[It] should print the output to logs [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:52 +Aug 24 13:16:05.947: INFO: Waiting up to 5m0s for pod "busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0" in namespace "kubelet-test-5806" to be "running and ready" +Aug 24 13:16:05.954: INFO: Pod "busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0": Phase="Pending", Reason="", readiness=false. Elapsed: 6.827637ms +Aug 24 13:16:05.954: INFO: The phase of Pod busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 13:16:07.969: INFO: Pod "busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0": Phase="Running", Reason="", readiness=true. Elapsed: 2.021651617s +Aug 24 13:16:07.969: INFO: The phase of Pod busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0 is Running (Ready = true) +Aug 24 13:16:07.969: INFO: Pod "busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0" satisfied condition "running and ready" +[AfterEach] [sig-node] Kubelet test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:09.267: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +Aug 24 13:16:07.996: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Kubelet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Kubelet tear down framework | framework.go:193 -STEP: Destroying namespace "custom-resource-definition-9238" for this suite. 07/29/23 17:06:09.276 +STEP: Destroying namespace "kubelet-test-5806" for this suite. 08/24/23 13:16:08.02 ------------------------------ -• [0.098 seconds] -[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should include custom resource definition resources in discovery documents [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:198 +• [2.186 seconds] +[sig-node] Kubelet +test/e2e/common/node/framework.go:23 + when scheduling a busybox command in a pod + test/e2e/common/node/kubelet.go:44 + should print the output to logs [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:52 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Kubelet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:09.192 - Jul 29 17:06:09.192: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename custom-resource-definition 07/29/23 17:06:09.196 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:09.25 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:09.256 - [BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 13:16:05.854 + Aug 24 13:16:05.854: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubelet-test 08/24/23 13:16:05.868 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:05.915 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:05.922 + [BeforeEach] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:31 - [It] should include custom resource definition resources in discovery documents [Conformance] - test/e2e/apimachinery/custom_resource_definition.go:198 - STEP: fetching the /apis discovery document 07/29/23 17:06:09.261 - STEP: finding the apiextensions.k8s.io API group in the /apis discovery document 07/29/23 17:06:09.263 - STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis discovery document 07/29/23 17:06:09.263 - STEP: fetching the /apis/apiextensions.k8s.io discovery document 07/29/23 17:06:09.263 - STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis/apiextensions.k8s.io discovery document 07/29/23 17:06:09.265 - STEP: fetching the /apis/apiextensions.k8s.io/v1 discovery document 07/29/23 17:06:09.265 - STEP: finding customresourcedefinitions resources in the /apis/apiextensions.k8s.io/v1 discovery document 07/29/23 17:06:09.267 - [AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Kubelet + test/e2e/common/node/kubelet.go:41 + [It] should print the output to logs [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:52 + Aug 24 13:16:05.947: INFO: Waiting up to 5m0s for pod "busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0" in namespace "kubelet-test-5806" to be "running and ready" + Aug 24 13:16:05.954: INFO: Pod "busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0": Phase="Pending", Reason="", readiness=false. Elapsed: 6.827637ms + Aug 24 13:16:05.954: INFO: The phase of Pod busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 13:16:07.969: INFO: Pod "busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0": Phase="Running", Reason="", readiness=true. Elapsed: 2.021651617s + Aug 24 13:16:07.969: INFO: The phase of Pod busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0 is Running (Ready = true) + Aug 24 13:16:07.969: INFO: Pod "busybox-scheduling-636b1339-bbc9-4466-84e1-5b49287517c0" satisfied condition "running and ready" + [AfterEach] [sig-node] Kubelet test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:09.267: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + Aug 24 13:16:07.996: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Kubelet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Kubelet tear down framework | framework.go:193 - STEP: Destroying namespace "custom-resource-definition-9238" for this suite. 07/29/23 17:06:09.276 + STEP: Destroying namespace "kubelet-test-5806" for this suite. 08/24/23 13:16:08.02 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected secret - should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:88 -[BeforeEach] [sig-storage] Projected secret +[sig-network] DNS + should provide DNS for pods for Subdomain [Conformance] + test/e2e/network/dns.go:290 +[BeforeEach] [sig-network] DNS set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:09.291 -Jul 29 17:06:09.291: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 17:06:09.295 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:09.332 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:09.337 -[BeforeEach] [sig-storage] Projected secret +STEP: Creating a kubernetes client 08/24/23 13:16:08.049 +Aug 24 13:16:08.049: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename dns 08/24/23 13:16:08.052 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:08.112 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:08.126 +[BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:88 -STEP: Creating projection with secret that has name projected-secret-test-map-fb9015ba-6c91-4c08-a6f3-2f138eff0d7d 07/29/23 17:06:09.342 -STEP: Creating a pod to test consume secrets 07/29/23 17:06:09.351 -Jul 29 17:06:09.368: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87" in namespace "projected-2005" to be "Succeeded or Failed" -Jul 29 17:06:09.375: INFO: Pod "pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87": Phase="Pending", Reason="", readiness=false. Elapsed: 6.209188ms -Jul 29 17:06:11.383: INFO: Pod "pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014167112s -Jul 29 17:06:13.383: INFO: Pod "pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01490822s -STEP: Saw pod success 07/29/23 17:06:13.384 -Jul 29 17:06:13.384: INFO: Pod "pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87" satisfied condition "Succeeded or Failed" -Jul 29 17:06:13.392: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87 container projected-secret-volume-test: -STEP: delete the pod 07/29/23 17:06:13.403 -Jul 29 17:06:13.423: INFO: Waiting for pod pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87 to disappear -Jul 29 17:06:13.431: INFO: Pod pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87 no longer exists -[AfterEach] [sig-storage] Projected secret +[It] should provide DNS for pods for Subdomain [Conformance] + test/e2e/network/dns.go:290 +STEP: Creating a test headless service 08/24/23 13:16:08.134 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local;sleep 1; done + 08/24/23 13:16:08.146 +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local;sleep 1; done + 08/24/23 13:16:08.147 +STEP: creating a pod to probe DNS 08/24/23 13:16:08.147 +STEP: submitting the pod to kubernetes 08/24/23 13:16:08.147 +Aug 24 13:16:08.181: INFO: Waiting up to 15m0s for pod "dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3" in namespace "dns-8260" to be "running" +Aug 24 13:16:08.196: INFO: Pod "dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3": Phase="Pending", Reason="", readiness=false. Elapsed: 14.773592ms +Aug 24 13:16:10.206: INFO: Pod "dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3": Phase="Running", Reason="", readiness=true. Elapsed: 2.025253145s +Aug 24 13:16:10.206: INFO: Pod "dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3" satisfied condition "running" +STEP: retrieving the pod 08/24/23 13:16:10.206 +STEP: looking for the results for each expected name from probers 08/24/23 13:16:10.214 +Aug 24 13:16:10.227: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:10.235: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:10.243: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:10.249: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:10.255: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:10.260: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:10.268: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:10.275: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:10.275: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + +Aug 24 13:16:15.290: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:15.297: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:15.306: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:15.313: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:15.318: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:15.324: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:15.330: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:15.335: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:15.335: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + +Aug 24 13:16:20.286: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:20.292: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:20.302: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:20.308: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:20.313: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:20.319: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:20.324: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:20.329: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:20.329: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + +Aug 24 13:16:25.283: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:25.291: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:25.297: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:25.303: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:25.311: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:25.316: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:25.324: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:25.331: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:25.331: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + +Aug 24 13:16:30.287: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:30.294: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:30.300: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:30.305: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:30.310: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:30.316: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:30.321: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:30.329: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:30.329: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + +Aug 24 13:16:35.286: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:35.295: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:35.302: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:35.309: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:35.317: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:35.324: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:35.332: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:35.340: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:35.340: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + +Aug 24 13:16:40.284: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:40.292: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:40.300: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:40.305: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) +Aug 24 13:16:40.328: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + +Aug 24 13:16:45.370: INFO: DNS probes using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 succeeded + +STEP: deleting the pod 08/24/23 13:16:45.371 +STEP: deleting the test headless service 08/24/23 13:16:45.436 +[AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:13.431: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected secret +Aug 24 13:16:45.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected secret +[DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected secret +[DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 -STEP: Destroying namespace "projected-2005" for this suite. 07/29/23 17:06:13.438 +STEP: Destroying namespace "dns-8260" for this suite. 08/24/23 13:16:45.498 ------------------------------ -• [4.156 seconds] -[sig-storage] Projected secret -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:88 +• [SLOW TEST] [37.471 seconds] +[sig-network] DNS +test/e2e/network/common/framework.go:23 + should provide DNS for pods for Subdomain [Conformance] + test/e2e/network/dns.go:290 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected secret + [BeforeEach] [sig-network] DNS set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:09.291 - Jul 29 17:06:09.291: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 17:06:09.295 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:09.332 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:09.337 - [BeforeEach] [sig-storage] Projected secret + STEP: Creating a kubernetes client 08/24/23 13:16:08.049 + Aug 24 13:16:08.049: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename dns 08/24/23 13:16:08.052 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:08.112 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:08.126 + [BeforeEach] [sig-network] DNS test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:88 - STEP: Creating projection with secret that has name projected-secret-test-map-fb9015ba-6c91-4c08-a6f3-2f138eff0d7d 07/29/23 17:06:09.342 - STEP: Creating a pod to test consume secrets 07/29/23 17:06:09.351 - Jul 29 17:06:09.368: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87" in namespace "projected-2005" to be "Succeeded or Failed" - Jul 29 17:06:09.375: INFO: Pod "pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87": Phase="Pending", Reason="", readiness=false. Elapsed: 6.209188ms - Jul 29 17:06:11.383: INFO: Pod "pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014167112s - Jul 29 17:06:13.383: INFO: Pod "pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01490822s - STEP: Saw pod success 07/29/23 17:06:13.384 - Jul 29 17:06:13.384: INFO: Pod "pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87" satisfied condition "Succeeded or Failed" - Jul 29 17:06:13.392: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87 container projected-secret-volume-test: - STEP: delete the pod 07/29/23 17:06:13.403 - Jul 29 17:06:13.423: INFO: Waiting for pod pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87 to disappear - Jul 29 17:06:13.431: INFO: Pod pod-projected-secrets-606ba7af-3c6e-4e59-9712-94ddd3121f87 no longer exists - [AfterEach] [sig-storage] Projected secret + [It] should provide DNS for pods for Subdomain [Conformance] + test/e2e/network/dns.go:290 + STEP: Creating a test headless service 08/24/23 13:16:08.134 + STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local;sleep 1; done + 08/24/23 13:16:08.146 + STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-8260.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local;sleep 1; done + 08/24/23 13:16:08.147 + STEP: creating a pod to probe DNS 08/24/23 13:16:08.147 + STEP: submitting the pod to kubernetes 08/24/23 13:16:08.147 + Aug 24 13:16:08.181: INFO: Waiting up to 15m0s for pod "dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3" in namespace "dns-8260" to be "running" + Aug 24 13:16:08.196: INFO: Pod "dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3": Phase="Pending", Reason="", readiness=false. Elapsed: 14.773592ms + Aug 24 13:16:10.206: INFO: Pod "dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3": Phase="Running", Reason="", readiness=true. Elapsed: 2.025253145s + Aug 24 13:16:10.206: INFO: Pod "dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3" satisfied condition "running" + STEP: retrieving the pod 08/24/23 13:16:10.206 + STEP: looking for the results for each expected name from probers 08/24/23 13:16:10.214 + Aug 24 13:16:10.227: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:10.235: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:10.243: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:10.249: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:10.255: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:10.260: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:10.268: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:10.275: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:10.275: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + + Aug 24 13:16:15.290: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:15.297: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:15.306: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:15.313: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:15.318: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:15.324: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:15.330: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:15.335: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:15.335: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + + Aug 24 13:16:20.286: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:20.292: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:20.302: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:20.308: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:20.313: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:20.319: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:20.324: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:20.329: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:20.329: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + + Aug 24 13:16:25.283: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:25.291: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:25.297: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:25.303: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:25.311: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:25.316: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:25.324: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:25.331: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:25.331: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + + Aug 24 13:16:30.287: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:30.294: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:30.300: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:30.305: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:30.310: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:30.316: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:30.321: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:30.329: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:30.329: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + + Aug 24 13:16:35.286: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:35.295: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:35.302: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:35.309: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:35.317: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:35.324: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:35.332: INFO: Unable to read jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:35.340: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:35.340: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local jessie_udp@dns-test-service-2.dns-8260.svc.cluster.local jessie_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + + Aug 24 13:16:40.284: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:40.292: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:40.300: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:40.305: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local from pod dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3: the server could not find the requested resource (get pods dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3) + Aug 24 13:16:40.328: INFO: Lookups using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-8260.svc.cluster.local wheezy_udp@dns-test-service-2.dns-8260.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-8260.svc.cluster.local] + + Aug 24 13:16:45.370: INFO: DNS probes using dns-8260/dns-test-c9bfce13-0dfb-49fc-9d60-0285ab4ca5e3 succeeded + + STEP: deleting the pod 08/24/23 13:16:45.371 + STEP: deleting the test headless service 08/24/23 13:16:45.436 + [AfterEach] [sig-network] DNS test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:13.431: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected secret + Aug 24 13:16:45.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] DNS test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected secret + [DeferCleanup (Each)] [sig-network] DNS dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected secret + [DeferCleanup (Each)] [sig-network] DNS tear down framework | framework.go:193 - STEP: Destroying namespace "projected-2005" for this suite. 07/29/23 17:06:13.438 + STEP: Destroying namespace "dns-8260" for this suite. 08/24/23 13:16:45.498 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] EmptyDir volumes - should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:117 -[BeforeEach] [sig-storage] EmptyDir volumes +[sig-apps] Daemon set [Serial] + should retry creating failed daemon pods [Conformance] + test/e2e/apps/daemon_set.go:305 +[BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:13.453 -Jul 29 17:06:13.453: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 17:06:13.455 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:13.483 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:13.488 -[BeforeEach] [sig-storage] EmptyDir volumes +STEP: Creating a kubernetes client 08/24/23 13:16:45.538 +Aug 24 13:16:45.538: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename daemonsets 08/24/23 13:16:45.54 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:45.564 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:45.57 +[BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 -[It] should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:117 -STEP: Creating a pod to test emptydir 0777 on tmpfs 07/29/23 17:06:13.492 -Jul 29 17:06:13.504: INFO: Waiting up to 5m0s for pod "pod-c7d8c2dc-32c1-492f-b724-3de937be454c" in namespace "emptydir-466" to be "Succeeded or Failed" -Jul 29 17:06:13.509: INFO: Pod "pod-c7d8c2dc-32c1-492f-b724-3de937be454c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.54321ms -Jul 29 17:06:15.519: INFO: Pod "pod-c7d8c2dc-32c1-492f-b724-3de937be454c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.0151726s -Jul 29 17:06:17.519: INFO: Pod "pod-c7d8c2dc-32c1-492f-b724-3de937be454c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014921046s -STEP: Saw pod success 07/29/23 17:06:17.519 -Jul 29 17:06:17.520: INFO: Pod "pod-c7d8c2dc-32c1-492f-b724-3de937be454c" satisfied condition "Succeeded or Failed" -Jul 29 17:06:17.528: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-c7d8c2dc-32c1-492f-b724-3de937be454c container test-container: -STEP: delete the pod 07/29/23 17:06:17.54 -Jul 29 17:06:17.556: INFO: Waiting for pod pod-c7d8c2dc-32c1-492f-b724-3de937be454c to disappear -Jul 29 17:06:17.561: INFO: Pod pod-c7d8c2dc-32c1-492f-b724-3de937be454c no longer exists -[AfterEach] [sig-storage] EmptyDir volumes +[BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 +[It] should retry creating failed daemon pods [Conformance] + test/e2e/apps/daemon_set.go:305 +STEP: Creating a simple DaemonSet "daemon-set" 08/24/23 13:16:45.616 +STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 13:16:45.628 +Aug 24 13:16:45.647: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 13:16:45.647: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 13:16:46.668: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 13:16:46.668: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 13:16:47.664: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 13:16:47.664: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 13:16:48.665: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 +Aug 24 13:16:48.665: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set +STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived. 08/24/23 13:16:48.67 +Aug 24 13:16:48.718: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 13:16:48.718: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 13:16:49.738: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 +Aug 24 13:16:49.738: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 +Aug 24 13:16:50.742: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 +Aug 24 13:16:50.743: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set +STEP: Wait for the failed daemon pod to be completely deleted. 08/24/23 13:16:50.743 +[AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 +STEP: Deleting DaemonSet "daemon-set" 08/24/23 13:16:50.758 +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-342, will wait for the garbage collector to delete the pods 08/24/23 13:16:50.758 +Aug 24 13:16:50.849: INFO: Deleting DaemonSet.extensions daemon-set took: 31.278103ms +Aug 24 13:16:50.950: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.953585ms +Aug 24 13:16:53.157: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 +Aug 24 13:16:53.157: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set +Aug 24 13:16:53.164: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"38182"},"items":null} + +Aug 24 13:16:53.170: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"38182"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:17.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +Aug 24 13:16:53.202: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes +[DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-466" for this suite. 07/29/23 17:06:17.569 +STEP: Destroying namespace "daemonsets-342" for this suite. 08/24/23 13:16:53.214 ------------------------------ -• [4.128 seconds] -[sig-storage] EmptyDir volumes -test/e2e/common/storage/framework.go:23 - should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:117 +• [SLOW TEST] [7.701 seconds] +[sig-apps] Daemon set [Serial] +test/e2e/apps/framework.go:23 + should retry creating failed daemon pods [Conformance] + test/e2e/apps/daemon_set.go:305 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-apps] Daemon set [Serial] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:13.453 - Jul 29 17:06:13.453: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 17:06:13.455 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:13.483 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:13.488 - [BeforeEach] [sig-storage] EmptyDir volumes + STEP: Creating a kubernetes client 08/24/23 13:16:45.538 + Aug 24 13:16:45.538: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename daemonsets 08/24/23 13:16:45.54 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:45.564 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:45.57 + [BeforeEach] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:31 - [It] should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:117 - STEP: Creating a pod to test emptydir 0777 on tmpfs 07/29/23 17:06:13.492 - Jul 29 17:06:13.504: INFO: Waiting up to 5m0s for pod "pod-c7d8c2dc-32c1-492f-b724-3de937be454c" in namespace "emptydir-466" to be "Succeeded or Failed" - Jul 29 17:06:13.509: INFO: Pod "pod-c7d8c2dc-32c1-492f-b724-3de937be454c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.54321ms - Jul 29 17:06:15.519: INFO: Pod "pod-c7d8c2dc-32c1-492f-b724-3de937be454c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.0151726s - Jul 29 17:06:17.519: INFO: Pod "pod-c7d8c2dc-32c1-492f-b724-3de937be454c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014921046s - STEP: Saw pod success 07/29/23 17:06:17.519 - Jul 29 17:06:17.520: INFO: Pod "pod-c7d8c2dc-32c1-492f-b724-3de937be454c" satisfied condition "Succeeded or Failed" - Jul 29 17:06:17.528: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-c7d8c2dc-32c1-492f-b724-3de937be454c container test-container: - STEP: delete the pod 07/29/23 17:06:17.54 - Jul 29 17:06:17.556: INFO: Waiting for pod pod-c7d8c2dc-32c1-492f-b724-3de937be454c to disappear - Jul 29 17:06:17.561: INFO: Pod pod-c7d8c2dc-32c1-492f-b724-3de937be454c no longer exists - [AfterEach] [sig-storage] EmptyDir volumes + [BeforeEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:157 + [It] should retry creating failed daemon pods [Conformance] + test/e2e/apps/daemon_set.go:305 + STEP: Creating a simple DaemonSet "daemon-set" 08/24/23 13:16:45.616 + STEP: Check that daemon pods launch on every node of the cluster. 08/24/23 13:16:45.628 + Aug 24 13:16:45.647: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 13:16:45.647: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 13:16:46.668: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 13:16:46.668: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 13:16:47.664: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 13:16:47.664: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 13:16:48.665: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 + Aug 24 13:16:48.665: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set + STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived. 08/24/23 13:16:48.67 + Aug 24 13:16:48.718: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 13:16:48.718: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 13:16:49.738: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 2 + Aug 24 13:16:49.738: INFO: Node pe9deep4seen-1 is running 0 daemon pod, expected 1 + Aug 24 13:16:50.742: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 3 + Aug 24 13:16:50.743: INFO: Number of running nodes: 3, number of available pods: 3 in daemonset daemon-set + STEP: Wait for the failed daemon pod to be completely deleted. 08/24/23 13:16:50.743 + [AfterEach] [sig-apps] Daemon set [Serial] + test/e2e/apps/daemon_set.go:122 + STEP: Deleting DaemonSet "daemon-set" 08/24/23 13:16:50.758 + STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-342, will wait for the garbage collector to delete the pods 08/24/23 13:16:50.758 + Aug 24 13:16:50.849: INFO: Deleting DaemonSet.extensions daemon-set took: 31.278103ms + Aug 24 13:16:50.950: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.953585ms + Aug 24 13:16:53.157: INFO: Number of nodes with available pods controlled by daemonset daemon-set: 0 + Aug 24 13:16:53.157: INFO: Number of running nodes: 0, number of available pods: 0 in daemonset daemon-set + Aug 24 13:16:53.164: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"38182"},"items":null} + + Aug 24 13:16:53.170: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"38182"},"items":null} + + [AfterEach] [sig-apps] Daemon set [Serial] test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:17.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + Aug 24 13:16:53.202: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes + [DeferCleanup (Each)] [sig-apps] Daemon set [Serial] tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-466" for this suite. 07/29/23 17:06:17.569 + STEP: Destroying namespace "daemonsets-342" for this suite. 08/24/23 13:16:53.214 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:249 -[BeforeEach] [sig-storage] Projected downwardAPI +[sig-node] ConfigMap + should run through a ConfigMap lifecycle [Conformance] + test/e2e/common/node/configmap.go:169 +[BeforeEach] [sig-node] ConfigMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:17.589 -Jul 29 17:06:17.589: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 17:06:17.591 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:17.62 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:17.624 -[BeforeEach] [sig-storage] Projected downwardAPI +STEP: Creating a kubernetes client 08/24/23 13:16:53.251 +Aug 24 13:16:53.252: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename configmap 08/24/23 13:16:53.257 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:53.288 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:53.295 +[BeforeEach] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:249 -STEP: Creating a pod to test downward API volume plugin 07/29/23 17:06:17.63 -Jul 29 17:06:17.642: INFO: Waiting up to 5m0s for pod "downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f" in namespace "projected-8138" to be "Succeeded or Failed" -Jul 29 17:06:17.653: INFO: Pod "downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f": Phase="Pending", Reason="", readiness=false. Elapsed: 11.360254ms -Jul 29 17:06:19.660: INFO: Pod "downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018503568s -Jul 29 17:06:21.662: INFO: Pod "downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020536788s -STEP: Saw pod success 07/29/23 17:06:21.663 -Jul 29 17:06:21.663: INFO: Pod "downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f" satisfied condition "Succeeded or Failed" -Jul 29 17:06:21.669: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f container client-container: -STEP: delete the pod 07/29/23 17:06:21.68 -Jul 29 17:06:21.695: INFO: Waiting for pod downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f to disappear -Jul 29 17:06:21.700: INFO: Pod downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f no longer exists -[AfterEach] [sig-storage] Projected downwardAPI +[It] should run through a ConfigMap lifecycle [Conformance] + test/e2e/common/node/configmap.go:169 +STEP: creating a ConfigMap 08/24/23 13:16:53.304 +STEP: fetching the ConfigMap 08/24/23 13:16:53.315 +STEP: patching the ConfigMap 08/24/23 13:16:53.321 +STEP: listing all ConfigMaps in all namespaces with a label selector 08/24/23 13:16:53.332 +STEP: deleting the ConfigMap by collection with a label selector 08/24/23 13:16:53.341 +STEP: listing all ConfigMaps in test namespace 08/24/23 13:16:53.353 +[AfterEach] [sig-node] ConfigMap test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:21.700: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +Aug 24 13:16:53.359: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-node] ConfigMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-node] ConfigMap tear down framework | framework.go:193 -STEP: Destroying namespace "projected-8138" for this suite. 07/29/23 17:06:21.709 +STEP: Destroying namespace "configmap-4792" for this suite. 08/24/23 13:16:53.367 ------------------------------ -• [4.132 seconds] -[sig-storage] Projected downwardAPI -test/e2e/common/storage/framework.go:23 - should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:249 +• [0.129 seconds] +[sig-node] ConfigMap +test/e2e/common/node/framework.go:23 + should run through a ConfigMap lifecycle [Conformance] + test/e2e/common/node/configmap.go:169 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-node] ConfigMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:17.589 - Jul 29 17:06:17.589: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 17:06:17.591 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:17.62 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:17.624 - [BeforeEach] [sig-storage] Projected downwardAPI + STEP: Creating a kubernetes client 08/24/23 13:16:53.251 + Aug 24 13:16:53.252: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename configmap 08/24/23 13:16:53.257 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:53.288 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:53.295 + [BeforeEach] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:249 - STEP: Creating a pod to test downward API volume plugin 07/29/23 17:06:17.63 - Jul 29 17:06:17.642: INFO: Waiting up to 5m0s for pod "downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f" in namespace "projected-8138" to be "Succeeded or Failed" - Jul 29 17:06:17.653: INFO: Pod "downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f": Phase="Pending", Reason="", readiness=false. Elapsed: 11.360254ms - Jul 29 17:06:19.660: INFO: Pod "downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018503568s - Jul 29 17:06:21.662: INFO: Pod "downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020536788s - STEP: Saw pod success 07/29/23 17:06:21.663 - Jul 29 17:06:21.663: INFO: Pod "downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f" satisfied condition "Succeeded or Failed" - Jul 29 17:06:21.669: INFO: Trying to get logs from node wetuj3nuajog-3 pod downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f container client-container: - STEP: delete the pod 07/29/23 17:06:21.68 - Jul 29 17:06:21.695: INFO: Waiting for pod downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f to disappear - Jul 29 17:06:21.700: INFO: Pod downwardapi-volume-5ef78a3e-f43a-4126-bc30-1a5a7a944a2f no longer exists - [AfterEach] [sig-storage] Projected downwardAPI + [It] should run through a ConfigMap lifecycle [Conformance] + test/e2e/common/node/configmap.go:169 + STEP: creating a ConfigMap 08/24/23 13:16:53.304 + STEP: fetching the ConfigMap 08/24/23 13:16:53.315 + STEP: patching the ConfigMap 08/24/23 13:16:53.321 + STEP: listing all ConfigMaps in all namespaces with a label selector 08/24/23 13:16:53.332 + STEP: deleting the ConfigMap by collection with a label selector 08/24/23 13:16:53.341 + STEP: listing all ConfigMaps in test namespace 08/24/23 13:16:53.353 + [AfterEach] [sig-node] ConfigMap test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:21.700: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 13:16:53.359: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] ConfigMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-node] ConfigMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-node] ConfigMap tear down framework | framework.go:193 - STEP: Destroying namespace "projected-8138" for this suite. 07/29/23 17:06:21.709 + STEP: Destroying namespace "configmap-4792" for this suite. 08/24/23 13:16:53.367 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSSSSSSS ------------------------------ -[sig-node] PodTemplates - should run the lifecycle of PodTemplates [Conformance] - test/e2e/common/node/podtemplates.go:53 -[BeforeEach] [sig-node] PodTemplates +[sig-storage] EmptyDir volumes + volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:87 +[BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:21.721 -Jul 29 17:06:21.721: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename podtemplate 07/29/23 17:06:21.723 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:21.75 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:21.754 -[BeforeEach] [sig-node] PodTemplates +STEP: Creating a kubernetes client 08/24/23 13:16:53.385 +Aug 24 13:16:53.386: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename emptydir 08/24/23 13:16:53.388 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:53.415 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:53.42 +[BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 -[It] should run the lifecycle of PodTemplates [Conformance] - test/e2e/common/node/podtemplates.go:53 -[AfterEach] [sig-node] PodTemplates +[It] volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:87 +STEP: Creating a pod to test emptydir volume type on tmpfs 08/24/23 13:16:53.424 +Aug 24 13:16:53.438: INFO: Waiting up to 5m0s for pod "pod-f43dc148-d23a-48f7-809a-19df98f2be85" in namespace "emptydir-2739" to be "Succeeded or Failed" +Aug 24 13:16:53.453: INFO: Pod "pod-f43dc148-d23a-48f7-809a-19df98f2be85": Phase="Pending", Reason="", readiness=false. Elapsed: 14.933484ms +Aug 24 13:16:55.462: INFO: Pod "pod-f43dc148-d23a-48f7-809a-19df98f2be85": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023401244s +Aug 24 13:16:57.461: INFO: Pod "pod-f43dc148-d23a-48f7-809a-19df98f2be85": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02228143s +STEP: Saw pod success 08/24/23 13:16:57.461 +Aug 24 13:16:57.462: INFO: Pod "pod-f43dc148-d23a-48f7-809a-19df98f2be85" satisfied condition "Succeeded or Failed" +Aug 24 13:16:57.468: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-f43dc148-d23a-48f7-809a-19df98f2be85 container test-container: +STEP: delete the pod 08/24/23 13:16:57.488 +Aug 24 13:16:57.509: INFO: Waiting for pod pod-f43dc148-d23a-48f7-809a-19df98f2be85 to disappear +Aug 24 13:16:57.515: INFO: Pod pod-f43dc148-d23a-48f7-809a-19df98f2be85 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:21.810: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] PodTemplates +Aug 24 13:16:57.515: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] PodTemplates +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] PodTemplates +[DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 -STEP: Destroying namespace "podtemplate-1982" for this suite. 07/29/23 17:06:21.821 +STEP: Destroying namespace "emptydir-2739" for this suite. 08/24/23 13:16:57.525 ------------------------------ -• [0.110 seconds] -[sig-node] PodTemplates -test/e2e/common/node/framework.go:23 - should run the lifecycle of PodTemplates [Conformance] - test/e2e/common/node/podtemplates.go:53 +• [4.158 seconds] +[sig-storage] EmptyDir volumes +test/e2e/common/storage/framework.go:23 + volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:87 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] PodTemplates + [BeforeEach] [sig-storage] EmptyDir volumes set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:21.721 - Jul 29 17:06:21.721: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename podtemplate 07/29/23 17:06:21.723 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:21.75 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:21.754 - [BeforeEach] [sig-node] PodTemplates + STEP: Creating a kubernetes client 08/24/23 13:16:53.385 + Aug 24 13:16:53.386: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename emptydir 08/24/23 13:16:53.388 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:53.415 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:53.42 + [BeforeEach] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:31 - [It] should run the lifecycle of PodTemplates [Conformance] - test/e2e/common/node/podtemplates.go:53 - [AfterEach] [sig-node] PodTemplates + [It] volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/storage/empty_dir.go:87 + STEP: Creating a pod to test emptydir volume type on tmpfs 08/24/23 13:16:53.424 + Aug 24 13:16:53.438: INFO: Waiting up to 5m0s for pod "pod-f43dc148-d23a-48f7-809a-19df98f2be85" in namespace "emptydir-2739" to be "Succeeded or Failed" + Aug 24 13:16:53.453: INFO: Pod "pod-f43dc148-d23a-48f7-809a-19df98f2be85": Phase="Pending", Reason="", readiness=false. Elapsed: 14.933484ms + Aug 24 13:16:55.462: INFO: Pod "pod-f43dc148-d23a-48f7-809a-19df98f2be85": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023401244s + Aug 24 13:16:57.461: INFO: Pod "pod-f43dc148-d23a-48f7-809a-19df98f2be85": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02228143s + STEP: Saw pod success 08/24/23 13:16:57.461 + Aug 24 13:16:57.462: INFO: Pod "pod-f43dc148-d23a-48f7-809a-19df98f2be85" satisfied condition "Succeeded or Failed" + Aug 24 13:16:57.468: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-f43dc148-d23a-48f7-809a-19df98f2be85 container test-container: + STEP: delete the pod 08/24/23 13:16:57.488 + Aug 24 13:16:57.509: INFO: Waiting for pod pod-f43dc148-d23a-48f7-809a-19df98f2be85 to disappear + Aug 24 13:16:57.515: INFO: Pod pod-f43dc148-d23a-48f7-809a-19df98f2be85 no longer exists + [AfterEach] [sig-storage] EmptyDir volumes test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:21.810: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] PodTemplates + Aug 24 13:16:57.515: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] PodTemplates + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] PodTemplates + [DeferCleanup (Each)] [sig-storage] EmptyDir volumes tear down framework | framework.go:193 - STEP: Destroying namespace "podtemplate-1982" for this suite. 07/29/23 17:06:21.821 + STEP: Destroying namespace "emptydir-2739" for this suite. 08/24/23 13:16:57.525 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - listing validating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:582 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-network] Services + should be able to create a functioning NodePort service [Conformance] + test/e2e/network/service.go:1302 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:21.837 -Jul 29 17:06:21.837: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 17:06:21.839 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:21.87 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:21.875 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 17:06:21.899 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 17:06:22.751 -STEP: Deploying the webhook pod 07/29/23 17:06:22.766 -STEP: Wait for the deployment to be ready 07/29/23 17:06:22.785 -Jul 29 17:06:22.796: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -STEP: Deploying the webhook service 07/29/23 17:06:24.84 -STEP: Verifying the service has paired with the endpoint 07/29/23 17:06:24.871 -Jul 29 17:06:25.872: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] listing validating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:582 -STEP: Listing all of the created validation webhooks 07/29/23 17:06:25.975 -STEP: Creating a configMap that does not comply to the validation webhook rules 07/29/23 17:06:26.045 -STEP: Deleting the collection of validation webhooks 07/29/23 17:06:26.095 -STEP: Creating a configMap that does not comply to the validation webhook rules 07/29/23 17:06:26.256 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 13:16:57.554 +Aug 24 13:16:57.554: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 13:16:57.556 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:57.591 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:57.6 +[BeforeEach] [sig-network] Services + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should be able to create a functioning NodePort service [Conformance] + test/e2e/network/service.go:1302 +STEP: creating service nodeport-test with type=NodePort in namespace services-1631 08/24/23 13:16:57.605 +STEP: creating replication controller nodeport-test in namespace services-1631 08/24/23 13:16:57.633 +I0824 13:16:57.648064 14 runners.go:193] Created replication controller with name: nodeport-test, namespace: services-1631, replica count: 2 +I0824 13:17:00.700849 14 runners.go:193] nodeport-test Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Aug 24 13:17:00.701: INFO: Creating new exec pod +Aug 24 13:17:00.717: INFO: Waiting up to 5m0s for pod "execpodfjzdq" in namespace "services-1631" to be "running" +Aug 24 13:17:00.724: INFO: Pod "execpodfjzdq": Phase="Pending", Reason="", readiness=false. Elapsed: 7.011757ms +Aug 24 13:17:02.731: INFO: Pod "execpodfjzdq": Phase="Running", Reason="", readiness=true. Elapsed: 2.014203314s +Aug 24 13:17:02.731: INFO: Pod "execpodfjzdq" satisfied condition "running" +Aug 24 13:17:03.746: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1631 exec execpodfjzdq -- /bin/sh -x -c nc -v -z -w 2 nodeport-test 80' +Aug 24 13:17:04.076: INFO: stderr: "+ nc -v -z -w 2 nodeport-test 80\nConnection to nodeport-test 80 port [tcp/http] succeeded!\n" +Aug 24 13:17:04.076: INFO: stdout: "" +Aug 24 13:17:04.077: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1631 exec execpodfjzdq -- /bin/sh -x -c nc -v -z -w 2 10.233.63.220 80' +Aug 24 13:17:04.319: INFO: stderr: "+ nc -v -z -w 2 10.233.63.220 80\nConnection to 10.233.63.220 80 port [tcp/http] succeeded!\n" +Aug 24 13:17:04.319: INFO: stdout: "" +Aug 24 13:17:04.320: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1631 exec execpodfjzdq -- /bin/sh -x -c nc -v -z -w 2 192.168.121.130 30281' +Aug 24 13:17:04.594: INFO: stderr: "+ nc -v -z -w 2 192.168.121.130 30281\nConnection to 192.168.121.130 30281 port [tcp/*] succeeded!\n" +Aug 24 13:17:04.594: INFO: stdout: "" +Aug 24 13:17:04.595: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1631 exec execpodfjzdq -- /bin/sh -x -c nc -v -z -w 2 192.168.121.111 30281' +Aug 24 13:17:04.900: INFO: stderr: "+ nc -v -z -w 2 192.168.121.111 30281\nConnection to 192.168.121.111 30281 port [tcp/*] succeeded!\n" +Aug 24 13:17:04.900: INFO: stdout: "" +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:26.283: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 13:17:04.901: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-8843" for this suite. 07/29/23 17:06:26.385 -STEP: Destroying namespace "webhook-8843-markers" for this suite. 07/29/23 17:06:26.405 +STEP: Destroying namespace "services-1631" for this suite. 08/24/23 13:17:04.909 ------------------------------ -• [4.596 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - listing validating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:582 +• [SLOW TEST] [7.368 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should be able to create a functioning NodePort service [Conformance] + test/e2e/network/service.go:1302 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:21.837 - Jul 29 17:06:21.837: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 17:06:21.839 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:21.87 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:21.875 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 13:16:57.554 + Aug 24 13:16:57.554: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 13:16:57.556 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:16:57.591 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:16:57.6 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 17:06:21.899 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 17:06:22.751 - STEP: Deploying the webhook pod 07/29/23 17:06:22.766 - STEP: Wait for the deployment to be ready 07/29/23 17:06:22.785 - Jul 29 17:06:22.796: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created - STEP: Deploying the webhook service 07/29/23 17:06:24.84 - STEP: Verifying the service has paired with the endpoint 07/29/23 17:06:24.871 - Jul 29 17:06:25.872: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] listing validating webhooks should work [Conformance] - test/e2e/apimachinery/webhook.go:582 - STEP: Listing all of the created validation webhooks 07/29/23 17:06:25.975 - STEP: Creating a configMap that does not comply to the validation webhook rules 07/29/23 17:06:26.045 - STEP: Deleting the collection of validation webhooks 07/29/23 17:06:26.095 - STEP: Creating a configMap that does not comply to the validation webhook rules 07/29/23 17:06:26.256 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should be able to create a functioning NodePort service [Conformance] + test/e2e/network/service.go:1302 + STEP: creating service nodeport-test with type=NodePort in namespace services-1631 08/24/23 13:16:57.605 + STEP: creating replication controller nodeport-test in namespace services-1631 08/24/23 13:16:57.633 + I0824 13:16:57.648064 14 runners.go:193] Created replication controller with name: nodeport-test, namespace: services-1631, replica count: 2 + I0824 13:17:00.700849 14 runners.go:193] nodeport-test Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady + Aug 24 13:17:00.701: INFO: Creating new exec pod + Aug 24 13:17:00.717: INFO: Waiting up to 5m0s for pod "execpodfjzdq" in namespace "services-1631" to be "running" + Aug 24 13:17:00.724: INFO: Pod "execpodfjzdq": Phase="Pending", Reason="", readiness=false. Elapsed: 7.011757ms + Aug 24 13:17:02.731: INFO: Pod "execpodfjzdq": Phase="Running", Reason="", readiness=true. Elapsed: 2.014203314s + Aug 24 13:17:02.731: INFO: Pod "execpodfjzdq" satisfied condition "running" + Aug 24 13:17:03.746: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1631 exec execpodfjzdq -- /bin/sh -x -c nc -v -z -w 2 nodeport-test 80' + Aug 24 13:17:04.076: INFO: stderr: "+ nc -v -z -w 2 nodeport-test 80\nConnection to nodeport-test 80 port [tcp/http] succeeded!\n" + Aug 24 13:17:04.076: INFO: stdout: "" + Aug 24 13:17:04.077: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1631 exec execpodfjzdq -- /bin/sh -x -c nc -v -z -w 2 10.233.63.220 80' + Aug 24 13:17:04.319: INFO: stderr: "+ nc -v -z -w 2 10.233.63.220 80\nConnection to 10.233.63.220 80 port [tcp/http] succeeded!\n" + Aug 24 13:17:04.319: INFO: stdout: "" + Aug 24 13:17:04.320: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1631 exec execpodfjzdq -- /bin/sh -x -c nc -v -z -w 2 192.168.121.130 30281' + Aug 24 13:17:04.594: INFO: stderr: "+ nc -v -z -w 2 192.168.121.130 30281\nConnection to 192.168.121.130 30281 port [tcp/*] succeeded!\n" + Aug 24 13:17:04.594: INFO: stdout: "" + Aug 24 13:17:04.595: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-2729572383 --namespace=services-1631 exec execpodfjzdq -- /bin/sh -x -c nc -v -z -w 2 192.168.121.111 30281' + Aug 24 13:17:04.900: INFO: stderr: "+ nc -v -z -w 2 192.168.121.111 30281\nConnection to 192.168.121.111 30281 port [tcp/*] succeeded!\n" + Aug 24 13:17:04.900: INFO: stdout: "" + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:26.283: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 13:17:04.901: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-8843" for this suite. 07/29/23 17:06:26.385 - STEP: Destroying namespace "webhook-8843-markers" for this suite. 07/29/23 17:06:26.405 + STEP: Destroying namespace "services-1631" for this suite. 08/24/23 13:17:04.909 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSS ------------------------------ -[sig-node] Container Runtime blackbox test on terminated container - should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:195 -[BeforeEach] [sig-node] Container Runtime +[sig-node] InitContainer [NodeConformance] + should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + test/e2e/common/node/init_container.go:458 +[BeforeEach] [sig-node] InitContainer [NodeConformance] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:26.449 -Jul 29 17:06:26.449: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename container-runtime 07/29/23 17:06:26.451 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:26.483 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:26.49 -[BeforeEach] [sig-node] Container Runtime +STEP: Creating a kubernetes client 08/24/23 13:17:04.923 +Aug 24 13:17:04.923: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename init-container 08/24/23 13:17:04.925 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:17:04.958 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:17:04.962 +[BeforeEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:31 -[It] should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:195 -STEP: create the container 07/29/23 17:06:26.496 -STEP: wait for the container to reach Succeeded 07/29/23 17:06:26.515 -STEP: get the container status 07/29/23 17:06:30.553 -STEP: the container should be terminated 07/29/23 17:06:30.559 -STEP: the termination message should be set 07/29/23 17:06:30.559 -Jul 29 17:06:30.559: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- -STEP: delete the container 07/29/23 17:06:30.56 -[AfterEach] [sig-node] Container Runtime +[BeforeEach] [sig-node] InitContainer [NodeConformance] + test/e2e/common/node/init_container.go:165 +[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + test/e2e/common/node/init_container.go:458 +STEP: creating the pod 08/24/23 13:17:04.966 +Aug 24 13:17:04.968: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:30.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Container Runtime +Aug 24 13:17:09.122: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Container Runtime +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Container Runtime +[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] tear down framework | framework.go:193 -STEP: Destroying namespace "container-runtime-4897" for this suite. 07/29/23 17:06:30.609 +STEP: Destroying namespace "init-container-8690" for this suite. 08/24/23 13:17:09.134 ------------------------------ -• [4.172 seconds] -[sig-node] Container Runtime +• [4.224 seconds] +[sig-node] InitContainer [NodeConformance] test/e2e/common/node/framework.go:23 - blackbox test - test/e2e/common/node/runtime.go:44 - on terminated container - test/e2e/common/node/runtime.go:137 - should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:195 + should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + test/e2e/common/node/init_container.go:458 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Container Runtime + [BeforeEach] [sig-node] InitContainer [NodeConformance] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:26.449 - Jul 29 17:06:26.449: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename container-runtime 07/29/23 17:06:26.451 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:26.483 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:26.49 - [BeforeEach] [sig-node] Container Runtime + STEP: Creating a kubernetes client 08/24/23 13:17:04.923 + Aug 24 13:17:04.923: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename init-container 08/24/23 13:17:04.925 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:17:04.958 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:17:04.962 + [BeforeEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:31 - [It] should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] - test/e2e/common/node/runtime.go:195 - STEP: create the container 07/29/23 17:06:26.496 - STEP: wait for the container to reach Succeeded 07/29/23 17:06:26.515 - STEP: get the container status 07/29/23 17:06:30.553 - STEP: the container should be terminated 07/29/23 17:06:30.559 - STEP: the termination message should be set 07/29/23 17:06:30.559 - Jul 29 17:06:30.559: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- - STEP: delete the container 07/29/23 17:06:30.56 - [AfterEach] [sig-node] Container Runtime + [BeforeEach] [sig-node] InitContainer [NodeConformance] + test/e2e/common/node/init_container.go:165 + [It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + test/e2e/common/node/init_container.go:458 + STEP: creating the pod 08/24/23 13:17:04.966 + Aug 24 13:17:04.968: INFO: PodSpec: initContainers in spec.initContainers + [AfterEach] [sig-node] InitContainer [NodeConformance] test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:30.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Container Runtime + Aug 24 13:17:09.122: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Container Runtime + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Container Runtime + [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] tear down framework | framework.go:193 - STEP: Destroying namespace "container-runtime-4897" for this suite. 07/29/23 17:06:30.609 + STEP: Destroying namespace "init-container-8690" for this suite. 08/24/23 13:17:09.134 << End Captured GinkgoWriter Output ------------------------------ -SSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] Garbage collector - should delete pods created by rc when not orphaning [Conformance] - test/e2e/apimachinery/garbage_collector.go:312 + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + test/e2e/apimachinery/garbage_collector.go:735 [BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:30.623 -Jul 29 17:06:30.623: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename gc 07/29/23 17:06:30.627 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:30.659 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:30.664 +STEP: Creating a kubernetes client 08/24/23 13:17:09.15 +Aug 24 13:17:09.150: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename gc 08/24/23 13:17:09.152 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:17:09.189 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:17:09.194 [BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 -[It] should delete pods created by rc when not orphaning [Conformance] - test/e2e/apimachinery/garbage_collector.go:312 -STEP: create the rc 07/29/23 17:06:30.669 -STEP: delete the rc 07/29/23 17:06:35.693 -STEP: wait for all pods to be garbage collected 07/29/23 17:06:35.71 -STEP: Gathering metrics 07/29/23 17:06:40.721 -Jul 29 17:06:40.775: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" -Jul 29 17:06:40.782: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.606968ms -Jul 29 17:06:40.782: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) -Jul 29 17:06:40.782: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" -Jul 29 17:06:40.890: INFO: For apiserver_request_total: +[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + test/e2e/apimachinery/garbage_collector.go:735 +STEP: create the rc1 08/24/23 13:17:09.21 +STEP: create the rc2 08/24/23 13:17:09.219 +STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well 08/24/23 13:17:14.401 +STEP: delete the rc simpletest-rc-to-be-deleted 08/24/23 13:17:22.046 +STEP: wait for the rc to be deleted 08/24/23 13:17:22.211 +Aug 24 13:17:27.484: INFO: 95 pods remaining +Aug 24 13:17:27.484: INFO: 71 pods has nil DeletionTimestamp +Aug 24 13:17:27.485: INFO: +Aug 24 13:17:32.316: INFO: 81 pods remaining +Aug 24 13:17:32.316: INFO: 50 pods has nil DeletionTimestamp +Aug 24 13:17:32.317: INFO: +STEP: Gathering metrics 08/24/23 13:17:37.257 +Aug 24 13:17:37.701: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" +Aug 24 13:17:37.719: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 17.684136ms +Aug 24 13:17:37.719: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) +Aug 24 13:17:37.720: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" +Aug 24 13:17:38.200: INFO: For apiserver_request_total: For apiserver_request_latency_seconds: For apiserver_init_events_total: For garbage_collector_attempt_to_delete_queue_latency: @@ -36153,44 +35895,102 @@ For function_duration_seconds: For errors_total: For evicted_pods_total: +Aug 24 13:17:38.201: INFO: Deleting pod "simpletest-rc-to-be-deleted-296xc" in namespace "gc-2444" +Aug 24 13:17:38.458: INFO: Deleting pod "simpletest-rc-to-be-deleted-2d5jb" in namespace "gc-2444" +Aug 24 13:17:38.593: INFO: Deleting pod "simpletest-rc-to-be-deleted-2mdvv" in namespace "gc-2444" +Aug 24 13:17:38.725: INFO: Deleting pod "simpletest-rc-to-be-deleted-2wd4l" in namespace "gc-2444" +Aug 24 13:17:38.802: INFO: Deleting pod "simpletest-rc-to-be-deleted-46fms" in namespace "gc-2444" +Aug 24 13:17:38.868: INFO: Deleting pod "simpletest-rc-to-be-deleted-4dndj" in namespace "gc-2444" +Aug 24 13:17:38.957: INFO: Deleting pod "simpletest-rc-to-be-deleted-4jxfj" in namespace "gc-2444" +Aug 24 13:17:39.009: INFO: Deleting pod "simpletest-rc-to-be-deleted-4mpf2" in namespace "gc-2444" +Aug 24 13:17:39.099: INFO: Deleting pod "simpletest-rc-to-be-deleted-4s9nn" in namespace "gc-2444" +Aug 24 13:17:39.263: INFO: Deleting pod "simpletest-rc-to-be-deleted-4wfkr" in namespace "gc-2444" +Aug 24 13:17:39.337: INFO: Deleting pod "simpletest-rc-to-be-deleted-4whcl" in namespace "gc-2444" +Aug 24 13:17:39.537: INFO: Deleting pod "simpletest-rc-to-be-deleted-4z2gj" in namespace "gc-2444" +Aug 24 13:17:39.584: INFO: Deleting pod "simpletest-rc-to-be-deleted-54vzx" in namespace "gc-2444" +Aug 24 13:17:39.653: INFO: Deleting pod "simpletest-rc-to-be-deleted-58gl8" in namespace "gc-2444" +Aug 24 13:17:39.710: INFO: Deleting pod "simpletest-rc-to-be-deleted-5nggh" in namespace "gc-2444" +Aug 24 13:17:39.774: INFO: Deleting pod "simpletest-rc-to-be-deleted-6htgx" in namespace "gc-2444" +Aug 24 13:17:39.805: INFO: Deleting pod "simpletest-rc-to-be-deleted-6j6kn" in namespace "gc-2444" +Aug 24 13:17:39.917: INFO: Deleting pod "simpletest-rc-to-be-deleted-6kwvl" in namespace "gc-2444" +Aug 24 13:17:39.986: INFO: Deleting pod "simpletest-rc-to-be-deleted-6mx42" in namespace "gc-2444" +Aug 24 13:17:40.098: INFO: Deleting pod "simpletest-rc-to-be-deleted-6p4ct" in namespace "gc-2444" +Aug 24 13:17:40.231: INFO: Deleting pod "simpletest-rc-to-be-deleted-7pr76" in namespace "gc-2444" +Aug 24 13:17:40.352: INFO: Deleting pod "simpletest-rc-to-be-deleted-7tbzp" in namespace "gc-2444" +Aug 24 13:17:40.502: INFO: Deleting pod "simpletest-rc-to-be-deleted-8bscb" in namespace "gc-2444" +Aug 24 13:17:40.697: INFO: Deleting pod "simpletest-rc-to-be-deleted-8rf6n" in namespace "gc-2444" +Aug 24 13:17:40.802: INFO: Deleting pod "simpletest-rc-to-be-deleted-8xl8t" in namespace "gc-2444" +Aug 24 13:17:40.920: INFO: Deleting pod "simpletest-rc-to-be-deleted-98d5g" in namespace "gc-2444" +Aug 24 13:17:41.050: INFO: Deleting pod "simpletest-rc-to-be-deleted-9crll" in namespace "gc-2444" +Aug 24 13:17:41.156: INFO: Deleting pod "simpletest-rc-to-be-deleted-9fvrj" in namespace "gc-2444" +Aug 24 13:17:41.218: INFO: Deleting pod "simpletest-rc-to-be-deleted-9ksqm" in namespace "gc-2444" +Aug 24 13:17:41.265: INFO: Deleting pod "simpletest-rc-to-be-deleted-9t79h" in namespace "gc-2444" +Aug 24 13:17:41.312: INFO: Deleting pod "simpletest-rc-to-be-deleted-b9z4c" in namespace "gc-2444" +Aug 24 13:17:41.389: INFO: Deleting pod "simpletest-rc-to-be-deleted-bjgz4" in namespace "gc-2444" +Aug 24 13:17:41.480: INFO: Deleting pod "simpletest-rc-to-be-deleted-bsrx7" in namespace "gc-2444" +Aug 24 13:17:41.553: INFO: Deleting pod "simpletest-rc-to-be-deleted-btkfw" in namespace "gc-2444" +Aug 24 13:17:41.659: INFO: Deleting pod "simpletest-rc-to-be-deleted-c4hm7" in namespace "gc-2444" +Aug 24 13:17:41.715: INFO: Deleting pod "simpletest-rc-to-be-deleted-c9m4f" in namespace "gc-2444" +Aug 24 13:17:41.782: INFO: Deleting pod "simpletest-rc-to-be-deleted-ch6b6" in namespace "gc-2444" +Aug 24 13:17:41.823: INFO: Deleting pod "simpletest-rc-to-be-deleted-cjqkl" in namespace "gc-2444" +Aug 24 13:17:41.851: INFO: Deleting pod "simpletest-rc-to-be-deleted-d82qt" in namespace "gc-2444" +Aug 24 13:17:41.925: INFO: Deleting pod "simpletest-rc-to-be-deleted-dhkjk" in namespace "gc-2444" +Aug 24 13:17:41.997: INFO: Deleting pod "simpletest-rc-to-be-deleted-djhmp" in namespace "gc-2444" +Aug 24 13:17:42.042: INFO: Deleting pod "simpletest-rc-to-be-deleted-f4xsv" in namespace "gc-2444" +Aug 24 13:17:42.152: INFO: Deleting pod "simpletest-rc-to-be-deleted-ff6nx" in namespace "gc-2444" +Aug 24 13:17:42.248: INFO: Deleting pod "simpletest-rc-to-be-deleted-fr55f" in namespace "gc-2444" +Aug 24 13:17:42.352: INFO: Deleting pod "simpletest-rc-to-be-deleted-fxwgm" in namespace "gc-2444" +Aug 24 13:17:42.436: INFO: Deleting pod "simpletest-rc-to-be-deleted-fz7qc" in namespace "gc-2444" +Aug 24 13:17:42.552: INFO: Deleting pod "simpletest-rc-to-be-deleted-g6wwk" in namespace "gc-2444" +Aug 24 13:17:42.622: INFO: Deleting pod "simpletest-rc-to-be-deleted-g8nxt" in namespace "gc-2444" +Aug 24 13:17:42.669: INFO: Deleting pod "simpletest-rc-to-be-deleted-gc5mt" in namespace "gc-2444" +Aug 24 13:17:42.729: INFO: Deleting pod "simpletest-rc-to-be-deleted-gggjm" in namespace "gc-2444" [AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:40.890: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 13:17:42.845: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 -STEP: Destroying namespace "gc-5285" for this suite. 07/29/23 17:06:40.899 +STEP: Destroying namespace "gc-2444" for this suite. 08/24/23 13:17:42.862 ------------------------------ -• [SLOW TEST] [10.288 seconds] +• [SLOW TEST] [33.794 seconds] [sig-api-machinery] Garbage collector test/e2e/apimachinery/framework.go:23 - should delete pods created by rc when not orphaning [Conformance] - test/e2e/apimachinery/garbage_collector.go:312 + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + test/e2e/apimachinery/garbage_collector.go:735 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-api-machinery] Garbage collector set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:30.623 - Jul 29 17:06:30.623: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename gc 07/29/23 17:06:30.627 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:30.659 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:30.664 + STEP: Creating a kubernetes client 08/24/23 13:17:09.15 + Aug 24 13:17:09.150: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename gc 08/24/23 13:17:09.152 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:17:09.189 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:17:09.194 [BeforeEach] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:31 - [It] should delete pods created by rc when not orphaning [Conformance] - test/e2e/apimachinery/garbage_collector.go:312 - STEP: create the rc 07/29/23 17:06:30.669 - STEP: delete the rc 07/29/23 17:06:35.693 - STEP: wait for all pods to be garbage collected 07/29/23 17:06:35.71 - STEP: Gathering metrics 07/29/23 17:06:40.721 - Jul 29 17:06:40.775: INFO: Waiting up to 5m0s for pod "kube-controller-manager-wetuj3nuajog-2" in namespace "kube-system" to be "running and ready" - Jul 29 17:06:40.782: INFO: Pod "kube-controller-manager-wetuj3nuajog-2": Phase="Running", Reason="", readiness=true. Elapsed: 6.606968ms - Jul 29 17:06:40.782: INFO: The phase of Pod kube-controller-manager-wetuj3nuajog-2 is Running (Ready = true) - Jul 29 17:06:40.782: INFO: Pod "kube-controller-manager-wetuj3nuajog-2" satisfied condition "running and ready" - Jul 29 17:06:40.890: INFO: For apiserver_request_total: + [It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + test/e2e/apimachinery/garbage_collector.go:735 + STEP: create the rc1 08/24/23 13:17:09.21 + STEP: create the rc2 08/24/23 13:17:09.219 + STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well 08/24/23 13:17:14.401 + STEP: delete the rc simpletest-rc-to-be-deleted 08/24/23 13:17:22.046 + STEP: wait for the rc to be deleted 08/24/23 13:17:22.211 + Aug 24 13:17:27.484: INFO: 95 pods remaining + Aug 24 13:17:27.484: INFO: 71 pods has nil DeletionTimestamp + Aug 24 13:17:27.485: INFO: + Aug 24 13:17:32.316: INFO: 81 pods remaining + Aug 24 13:17:32.316: INFO: 50 pods has nil DeletionTimestamp + Aug 24 13:17:32.317: INFO: + STEP: Gathering metrics 08/24/23 13:17:37.257 + Aug 24 13:17:37.701: INFO: Waiting up to 5m0s for pod "kube-controller-manager-pe9deep4seen-2" in namespace "kube-system" to be "running and ready" + Aug 24 13:17:37.719: INFO: Pod "kube-controller-manager-pe9deep4seen-2": Phase="Running", Reason="", readiness=true. Elapsed: 17.684136ms + Aug 24 13:17:37.719: INFO: The phase of Pod kube-controller-manager-pe9deep4seen-2 is Running (Ready = true) + Aug 24 13:17:37.720: INFO: Pod "kube-controller-manager-pe9deep4seen-2" satisfied condition "running and ready" + Aug 24 13:17:38.200: INFO: For apiserver_request_total: For apiserver_request_latency_seconds: For apiserver_init_events_total: For garbage_collector_attempt_to_delete_queue_latency: @@ -36213,385 +36013,313 @@ test/e2e/apimachinery/framework.go:23 For errors_total: For evicted_pods_total: + Aug 24 13:17:38.201: INFO: Deleting pod "simpletest-rc-to-be-deleted-296xc" in namespace "gc-2444" + Aug 24 13:17:38.458: INFO: Deleting pod "simpletest-rc-to-be-deleted-2d5jb" in namespace "gc-2444" + Aug 24 13:17:38.593: INFO: Deleting pod "simpletest-rc-to-be-deleted-2mdvv" in namespace "gc-2444" + Aug 24 13:17:38.725: INFO: Deleting pod "simpletest-rc-to-be-deleted-2wd4l" in namespace "gc-2444" + Aug 24 13:17:38.802: INFO: Deleting pod "simpletest-rc-to-be-deleted-46fms" in namespace "gc-2444" + Aug 24 13:17:38.868: INFO: Deleting pod "simpletest-rc-to-be-deleted-4dndj" in namespace "gc-2444" + Aug 24 13:17:38.957: INFO: Deleting pod "simpletest-rc-to-be-deleted-4jxfj" in namespace "gc-2444" + Aug 24 13:17:39.009: INFO: Deleting pod "simpletest-rc-to-be-deleted-4mpf2" in namespace "gc-2444" + Aug 24 13:17:39.099: INFO: Deleting pod "simpletest-rc-to-be-deleted-4s9nn" in namespace "gc-2444" + Aug 24 13:17:39.263: INFO: Deleting pod "simpletest-rc-to-be-deleted-4wfkr" in namespace "gc-2444" + Aug 24 13:17:39.337: INFO: Deleting pod "simpletest-rc-to-be-deleted-4whcl" in namespace "gc-2444" + Aug 24 13:17:39.537: INFO: Deleting pod "simpletest-rc-to-be-deleted-4z2gj" in namespace "gc-2444" + Aug 24 13:17:39.584: INFO: Deleting pod "simpletest-rc-to-be-deleted-54vzx" in namespace "gc-2444" + Aug 24 13:17:39.653: INFO: Deleting pod "simpletest-rc-to-be-deleted-58gl8" in namespace "gc-2444" + Aug 24 13:17:39.710: INFO: Deleting pod "simpletest-rc-to-be-deleted-5nggh" in namespace "gc-2444" + Aug 24 13:17:39.774: INFO: Deleting pod "simpletest-rc-to-be-deleted-6htgx" in namespace "gc-2444" + Aug 24 13:17:39.805: INFO: Deleting pod "simpletest-rc-to-be-deleted-6j6kn" in namespace "gc-2444" + Aug 24 13:17:39.917: INFO: Deleting pod "simpletest-rc-to-be-deleted-6kwvl" in namespace "gc-2444" + Aug 24 13:17:39.986: INFO: Deleting pod "simpletest-rc-to-be-deleted-6mx42" in namespace "gc-2444" + Aug 24 13:17:40.098: INFO: Deleting pod "simpletest-rc-to-be-deleted-6p4ct" in namespace "gc-2444" + Aug 24 13:17:40.231: INFO: Deleting pod "simpletest-rc-to-be-deleted-7pr76" in namespace "gc-2444" + Aug 24 13:17:40.352: INFO: Deleting pod "simpletest-rc-to-be-deleted-7tbzp" in namespace "gc-2444" + Aug 24 13:17:40.502: INFO: Deleting pod "simpletest-rc-to-be-deleted-8bscb" in namespace "gc-2444" + Aug 24 13:17:40.697: INFO: Deleting pod "simpletest-rc-to-be-deleted-8rf6n" in namespace "gc-2444" + Aug 24 13:17:40.802: INFO: Deleting pod "simpletest-rc-to-be-deleted-8xl8t" in namespace "gc-2444" + Aug 24 13:17:40.920: INFO: Deleting pod "simpletest-rc-to-be-deleted-98d5g" in namespace "gc-2444" + Aug 24 13:17:41.050: INFO: Deleting pod "simpletest-rc-to-be-deleted-9crll" in namespace "gc-2444" + Aug 24 13:17:41.156: INFO: Deleting pod "simpletest-rc-to-be-deleted-9fvrj" in namespace "gc-2444" + Aug 24 13:17:41.218: INFO: Deleting pod "simpletest-rc-to-be-deleted-9ksqm" in namespace "gc-2444" + Aug 24 13:17:41.265: INFO: Deleting pod "simpletest-rc-to-be-deleted-9t79h" in namespace "gc-2444" + Aug 24 13:17:41.312: INFO: Deleting pod "simpletest-rc-to-be-deleted-b9z4c" in namespace "gc-2444" + Aug 24 13:17:41.389: INFO: Deleting pod "simpletest-rc-to-be-deleted-bjgz4" in namespace "gc-2444" + Aug 24 13:17:41.480: INFO: Deleting pod "simpletest-rc-to-be-deleted-bsrx7" in namespace "gc-2444" + Aug 24 13:17:41.553: INFO: Deleting pod "simpletest-rc-to-be-deleted-btkfw" in namespace "gc-2444" + Aug 24 13:17:41.659: INFO: Deleting pod "simpletest-rc-to-be-deleted-c4hm7" in namespace "gc-2444" + Aug 24 13:17:41.715: INFO: Deleting pod "simpletest-rc-to-be-deleted-c9m4f" in namespace "gc-2444" + Aug 24 13:17:41.782: INFO: Deleting pod "simpletest-rc-to-be-deleted-ch6b6" in namespace "gc-2444" + Aug 24 13:17:41.823: INFO: Deleting pod "simpletest-rc-to-be-deleted-cjqkl" in namespace "gc-2444" + Aug 24 13:17:41.851: INFO: Deleting pod "simpletest-rc-to-be-deleted-d82qt" in namespace "gc-2444" + Aug 24 13:17:41.925: INFO: Deleting pod "simpletest-rc-to-be-deleted-dhkjk" in namespace "gc-2444" + Aug 24 13:17:41.997: INFO: Deleting pod "simpletest-rc-to-be-deleted-djhmp" in namespace "gc-2444" + Aug 24 13:17:42.042: INFO: Deleting pod "simpletest-rc-to-be-deleted-f4xsv" in namespace "gc-2444" + Aug 24 13:17:42.152: INFO: Deleting pod "simpletest-rc-to-be-deleted-ff6nx" in namespace "gc-2444" + Aug 24 13:17:42.248: INFO: Deleting pod "simpletest-rc-to-be-deleted-fr55f" in namespace "gc-2444" + Aug 24 13:17:42.352: INFO: Deleting pod "simpletest-rc-to-be-deleted-fxwgm" in namespace "gc-2444" + Aug 24 13:17:42.436: INFO: Deleting pod "simpletest-rc-to-be-deleted-fz7qc" in namespace "gc-2444" + Aug 24 13:17:42.552: INFO: Deleting pod "simpletest-rc-to-be-deleted-g6wwk" in namespace "gc-2444" + Aug 24 13:17:42.622: INFO: Deleting pod "simpletest-rc-to-be-deleted-g8nxt" in namespace "gc-2444" + Aug 24 13:17:42.669: INFO: Deleting pod "simpletest-rc-to-be-deleted-gc5mt" in namespace "gc-2444" + Aug 24 13:17:42.729: INFO: Deleting pod "simpletest-rc-to-be-deleted-gggjm" in namespace "gc-2444" [AfterEach] [sig-api-machinery] Garbage collector test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:40.890: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 13:17:42.845: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-api-machinery] Garbage collector test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-api-machinery] Garbage collector dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] Garbage collector tear down framework | framework.go:193 - STEP: Destroying namespace "gc-5285" for this suite. 07/29/23 17:06:40.899 - << End Captured GinkgoWriter Output ------------------------------- -SSSSSSSSSSS ------------------------------- -[sig-cli] Kubectl client Kubectl cluster-info - should check if Kubernetes control plane services is included in cluster-info [Conformance] - test/e2e/kubectl/kubectl.go:1250 -[BeforeEach] [sig-cli] Kubectl client - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:40.92 -Jul 29 17:06:40.920: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename kubectl 07/29/23 17:06:40.923 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:40.96 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:40.964 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 -[It] should check if Kubernetes control plane services is included in cluster-info [Conformance] - test/e2e/kubectl/kubectl.go:1250 -STEP: validating cluster-info 07/29/23 17:06:40.969 -Jul 29 17:06:40.970: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1013 cluster-info' -Jul 29 17:06:41.103: INFO: stderr: "" -Jul 29 17:06:41.103: INFO: stdout: "\x1b[0;32mKubernetes control plane\x1b[0m is running at \x1b[0;33mhttps://10.233.0.1:443\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" -[AfterEach] [sig-cli] Kubectl client - test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:41.103: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-cli] Kubectl client - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-cli] Kubectl client - tear down framework | framework.go:193 -STEP: Destroying namespace "kubectl-1013" for this suite. 07/29/23 17:06:41.11 ------------------------------- -• [0.203 seconds] -[sig-cli] Kubectl client -test/e2e/kubectl/framework.go:23 - Kubectl cluster-info - test/e2e/kubectl/kubectl.go:1244 - should check if Kubernetes control plane services is included in cluster-info [Conformance] - test/e2e/kubectl/kubectl.go:1250 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-cli] Kubectl client - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:40.92 - Jul 29 17:06:40.920: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename kubectl 07/29/23 17:06:40.923 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:40.96 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:40.964 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-cli] Kubectl client - test/e2e/kubectl/kubectl.go:274 - [It] should check if Kubernetes control plane services is included in cluster-info [Conformance] - test/e2e/kubectl/kubectl.go:1250 - STEP: validating cluster-info 07/29/23 17:06:40.969 - Jul 29 17:06:40.970: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=kubectl-1013 cluster-info' - Jul 29 17:06:41.103: INFO: stderr: "" - Jul 29 17:06:41.103: INFO: stdout: "\x1b[0;32mKubernetes control plane\x1b[0m is running at \x1b[0;33mhttps://10.233.0.1:443\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" - [AfterEach] [sig-cli] Kubectl client - test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:41.103: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-cli] Kubectl client - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-cli] Kubectl client - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-cli] Kubectl client - tear down framework | framework.go:193 - STEP: Destroying namespace "kubectl-1013" for this suite. 07/29/23 17:06:41.11 - << End Captured GinkgoWriter Output ------------------------------- -S ------------------------------- -[sig-storage] EmptyDir volumes - volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:87 -[BeforeEach] [sig-storage] EmptyDir volumes - set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:41.129 -Jul 29 17:06:41.129: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename emptydir 07/29/23 17:06:41.131 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:41.164 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:41.168 -[BeforeEach] [sig-storage] EmptyDir volumes - test/e2e/framework/metrics/init/init.go:31 -[It] volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:87 -STEP: Creating a pod to test emptydir volume type on tmpfs 07/29/23 17:06:41.172 -Jul 29 17:06:41.185: INFO: Waiting up to 5m0s for pod "pod-87c42215-d883-4a44-a833-346cc63378a8" in namespace "emptydir-6552" to be "Succeeded or Failed" -Jul 29 17:06:41.192: INFO: Pod "pod-87c42215-d883-4a44-a833-346cc63378a8": Phase="Pending", Reason="", readiness=false. Elapsed: 7.194475ms -Jul 29 17:06:43.201: INFO: Pod "pod-87c42215-d883-4a44-a833-346cc63378a8": Phase="Running", Reason="", readiness=true. Elapsed: 2.015761476s -Jul 29 17:06:45.202: INFO: Pod "pod-87c42215-d883-4a44-a833-346cc63378a8": Phase="Running", Reason="", readiness=false. Elapsed: 4.016891295s -Jul 29 17:06:47.200: INFO: Pod "pod-87c42215-d883-4a44-a833-346cc63378a8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.014587734s -STEP: Saw pod success 07/29/23 17:06:47.2 -Jul 29 17:06:47.200: INFO: Pod "pod-87c42215-d883-4a44-a833-346cc63378a8" satisfied condition "Succeeded or Failed" -Jul 29 17:06:47.208: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-87c42215-d883-4a44-a833-346cc63378a8 container test-container: -STEP: delete the pod 07/29/23 17:06:47.218 -Jul 29 17:06:47.238: INFO: Waiting for pod pod-87c42215-d883-4a44-a833-346cc63378a8 to disappear -Jul 29 17:06:47.244: INFO: Pod pod-87c42215-d883-4a44-a833-346cc63378a8 no longer exists -[AfterEach] [sig-storage] EmptyDir volumes - test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:47.244: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes - test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes - dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] EmptyDir volumes - tear down framework | framework.go:193 -STEP: Destroying namespace "emptydir-6552" for this suite. 07/29/23 17:06:47.251 ------------------------------- -• [SLOW TEST] [6.136 seconds] -[sig-storage] EmptyDir volumes -test/e2e/common/storage/framework.go:23 - volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:87 - - Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] EmptyDir volumes - set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:41.129 - Jul 29 17:06:41.129: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename emptydir 07/29/23 17:06:41.131 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:41.164 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:41.168 - [BeforeEach] [sig-storage] EmptyDir volumes - test/e2e/framework/metrics/init/init.go:31 - [It] volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] - test/e2e/common/storage/empty_dir.go:87 - STEP: Creating a pod to test emptydir volume type on tmpfs 07/29/23 17:06:41.172 - Jul 29 17:06:41.185: INFO: Waiting up to 5m0s for pod "pod-87c42215-d883-4a44-a833-346cc63378a8" in namespace "emptydir-6552" to be "Succeeded or Failed" - Jul 29 17:06:41.192: INFO: Pod "pod-87c42215-d883-4a44-a833-346cc63378a8": Phase="Pending", Reason="", readiness=false. Elapsed: 7.194475ms - Jul 29 17:06:43.201: INFO: Pod "pod-87c42215-d883-4a44-a833-346cc63378a8": Phase="Running", Reason="", readiness=true. Elapsed: 2.015761476s - Jul 29 17:06:45.202: INFO: Pod "pod-87c42215-d883-4a44-a833-346cc63378a8": Phase="Running", Reason="", readiness=false. Elapsed: 4.016891295s - Jul 29 17:06:47.200: INFO: Pod "pod-87c42215-d883-4a44-a833-346cc63378a8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.014587734s - STEP: Saw pod success 07/29/23 17:06:47.2 - Jul 29 17:06:47.200: INFO: Pod "pod-87c42215-d883-4a44-a833-346cc63378a8" satisfied condition "Succeeded or Failed" - Jul 29 17:06:47.208: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-87c42215-d883-4a44-a833-346cc63378a8 container test-container: - STEP: delete the pod 07/29/23 17:06:47.218 - Jul 29 17:06:47.238: INFO: Waiting for pod pod-87c42215-d883-4a44-a833-346cc63378a8 to disappear - Jul 29 17:06:47.244: INFO: Pod pod-87c42215-d883-4a44-a833-346cc63378a8 no longer exists - [AfterEach] [sig-storage] EmptyDir volumes - test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:47.244: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes - test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes - dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] EmptyDir volumes - tear down framework | framework.go:193 - STEP: Destroying namespace "emptydir-6552" for this suite. 07/29/23 17:06:47.251 + STEP: Destroying namespace "gc-2444" for this suite. 08/24/23 13:17:42.862 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSS +SSSSSSSSSSSSSSS ------------------------------ -[sig-api-machinery] Watchers - should be able to restart watching from the last resource version observed by the previous watch [Conformance] - test/e2e/apimachinery/watch.go:191 -[BeforeEach] [sig-api-machinery] Watchers +[sig-network] Services + should provide secure master service [Conformance] + test/e2e/network/service.go:777 +[BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:47.267 -Jul 29 17:06:47.268: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename watch 07/29/23 17:06:47.27 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:47.3 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:47.305 -[BeforeEach] [sig-api-machinery] Watchers +STEP: Creating a kubernetes client 08/24/23 13:17:42.948 +Aug 24 13:17:42.949: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 13:17:42.957 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:17:43.052 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:17:43.084 +[BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 -[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance] - test/e2e/apimachinery/watch.go:191 -STEP: creating a watch on configmaps 07/29/23 17:06:47.314 -STEP: creating a new configmap 07/29/23 17:06:47.324 -STEP: modifying the configmap once 07/29/23 17:06:47.331 -STEP: closing the watch once it receives two notifications 07/29/23 17:06:47.344 -Jul 29 17:06:47.344: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-3066 9fa393ac-7c76-4114-9254-a9617c803ef3 40237 0 2023-07-29 17:06:47 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-07-29 17:06:47 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 17:06:47.345: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-3066 9fa393ac-7c76-4114-9254-a9617c803ef3 40238 0 2023-07-29 17:06:47 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-07-29 17:06:47 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} -STEP: modifying the configmap a second time, while the watch is closed 07/29/23 17:06:47.345 -STEP: creating a new watch on configmaps from the last resource version observed by the first watch 07/29/23 17:06:47.358 -STEP: deleting the configmap 07/29/23 17:06:47.361 -STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed 07/29/23 17:06:47.375 -Jul 29 17:06:47.376: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-3066 9fa393ac-7c76-4114-9254-a9617c803ef3 40239 0 2023-07-29 17:06:47 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-07-29 17:06:47 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -Jul 29 17:06:47.376: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-3066 9fa393ac-7c76-4114-9254-a9617c803ef3 40240 0 2023-07-29 17:06:47 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-07-29 17:06:47 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} -[AfterEach] [sig-api-machinery] Watchers +[BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 +[It] should provide secure master service [Conformance] + test/e2e/network/service.go:777 +[AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:47.377: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-api-machinery] Watchers +Aug 24 13:17:43.112: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] Watchers +[DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] Watchers +[DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "watch-3066" for this suite. 07/29/23 17:06:47.387 +STEP: Destroying namespace "services-6666" for this suite. 08/24/23 13:17:43.143 ------------------------------ -• [0.129 seconds] -[sig-api-machinery] Watchers -test/e2e/apimachinery/framework.go:23 - should be able to restart watching from the last resource version observed by the previous watch [Conformance] - test/e2e/apimachinery/watch.go:191 +• [0.246 seconds] +[sig-network] Services +test/e2e/network/common/framework.go:23 + should provide secure master service [Conformance] + test/e2e/network/service.go:777 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] Watchers + [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:47.267 - Jul 29 17:06:47.268: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename watch 07/29/23 17:06:47.27 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:47.3 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:47.305 - [BeforeEach] [sig-api-machinery] Watchers + STEP: Creating a kubernetes client 08/24/23 13:17:42.948 + Aug 24 13:17:42.949: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 13:17:42.957 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:17:43.052 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:17:43.084 + [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 - [It] should be able to restart watching from the last resource version observed by the previous watch [Conformance] - test/e2e/apimachinery/watch.go:191 - STEP: creating a watch on configmaps 07/29/23 17:06:47.314 - STEP: creating a new configmap 07/29/23 17:06:47.324 - STEP: modifying the configmap once 07/29/23 17:06:47.331 - STEP: closing the watch once it receives two notifications 07/29/23 17:06:47.344 - Jul 29 17:06:47.344: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-3066 9fa393ac-7c76-4114-9254-a9617c803ef3 40237 0 2023-07-29 17:06:47 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-07-29 17:06:47 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 17:06:47.345: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-3066 9fa393ac-7c76-4114-9254-a9617c803ef3 40238 0 2023-07-29 17:06:47 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-07-29 17:06:47 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} - STEP: modifying the configmap a second time, while the watch is closed 07/29/23 17:06:47.345 - STEP: creating a new watch on configmaps from the last resource version observed by the first watch 07/29/23 17:06:47.358 - STEP: deleting the configmap 07/29/23 17:06:47.361 - STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed 07/29/23 17:06:47.375 - Jul 29 17:06:47.376: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-3066 9fa393ac-7c76-4114-9254-a9617c803ef3 40239 0 2023-07-29 17:06:47 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-07-29 17:06:47 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} - Jul 29 17:06:47.376: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-3066 9fa393ac-7c76-4114-9254-a9617c803ef3 40240 0 2023-07-29 17:06:47 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2023-07-29 17:06:47 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} - [AfterEach] [sig-api-machinery] Watchers + [BeforeEach] [sig-network] Services + test/e2e/network/service.go:766 + [It] should provide secure master service [Conformance] + test/e2e/network/service.go:777 + [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:47.377: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-api-machinery] Watchers + Aug 24 13:17:43.112: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] Watchers + [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] Watchers + [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "watch-3066" for this suite. 07/29/23 17:06:47.387 + STEP: Destroying namespace "services-6666" for this suite. 08/24/23 13:17:43.143 << End Captured GinkgoWriter Output ------------------------------ -SSSSSS +SSSSSSS ------------------------------ [sig-network] Services - should be able to create a functioning NodePort service [Conformance] - test/e2e/network/service.go:1302 + should test the lifecycle of an Endpoint [Conformance] + test/e2e/network/service.go:3244 [BeforeEach] [sig-network] Services set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:47.4 -Jul 29 17:06:47.401: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename services 07/29/23 17:06:47.403 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:47.434 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:47.439 +STEP: Creating a kubernetes client 08/24/23 13:17:43.195 +Aug 24 13:17:43.195: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename services 08/24/23 13:17:43.198 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:17:43.251 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:17:43.265 [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] Services test/e2e/network/service.go:766 -[It] should be able to create a functioning NodePort service [Conformance] - test/e2e/network/service.go:1302 -STEP: creating service nodeport-test with type=NodePort in namespace services-7009 07/29/23 17:06:47.443 -STEP: creating replication controller nodeport-test in namespace services-7009 07/29/23 17:06:47.474 -I0729 17:06:47.489419 13 runners.go:193] Created replication controller with name: nodeport-test, namespace: services-7009, replica count: 2 -I0729 17:06:50.541507 13 runners.go:193] nodeport-test Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady -Jul 29 17:06:50.541: INFO: Creating new exec pod -Jul 29 17:06:50.560: INFO: Waiting up to 5m0s for pod "execpodnjs4l" in namespace "services-7009" to be "running" -Jul 29 17:06:50.572: INFO: Pod "execpodnjs4l": Phase="Pending", Reason="", readiness=false. Elapsed: 11.859813ms -Jul 29 17:06:52.584: INFO: Pod "execpodnjs4l": Phase="Running", Reason="", readiness=true. Elapsed: 2.023886783s -Jul 29 17:06:52.584: INFO: Pod "execpodnjs4l" satisfied condition "running" -Jul 29 17:06:53.604: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7009 exec execpodnjs4l -- /bin/sh -x -c nc -v -z -w 2 nodeport-test 80' -Jul 29 17:06:53.884: INFO: stderr: "+ nc -v -z -w 2 nodeport-test 80\nConnection to nodeport-test 80 port [tcp/http] succeeded!\n" -Jul 29 17:06:53.884: INFO: stdout: "" -Jul 29 17:06:53.885: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7009 exec execpodnjs4l -- /bin/sh -x -c nc -v -z -w 2 10.233.53.157 80' -Jul 29 17:06:54.123: INFO: stderr: "+ nc -v -z -w 2 10.233.53.157 80\nConnection to 10.233.53.157 80 port [tcp/http] succeeded!\n" -Jul 29 17:06:54.123: INFO: stdout: "" -Jul 29 17:06:54.124: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7009 exec execpodnjs4l -- /bin/sh -x -c nc -v -z -w 2 192.168.121.120 30825' -Jul 29 17:06:54.360: INFO: stderr: "+ nc -v -z -w 2 192.168.121.120 30825\nConnection to 192.168.121.120 30825 port [tcp/*] succeeded!\n" -Jul 29 17:06:54.360: INFO: stdout: "" -Jul 29 17:06:54.361: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7009 exec execpodnjs4l -- /bin/sh -x -c nc -v -z -w 2 192.168.121.211 30825' -Jul 29 17:06:54.616: INFO: stderr: "+ nc -v -z -w 2 192.168.121.211 30825\nConnection to 192.168.121.211 30825 port [tcp/*] succeeded!\n" -Jul 29 17:06:54.616: INFO: stdout: "" +[It] should test the lifecycle of an Endpoint [Conformance] + test/e2e/network/service.go:3244 +STEP: creating an Endpoint 08/24/23 13:17:43.371 +STEP: waiting for available Endpoint 08/24/23 13:17:43.39 +STEP: listing all Endpoints 08/24/23 13:17:43.395 +STEP: updating the Endpoint 08/24/23 13:17:43.574 +STEP: fetching the Endpoint 08/24/23 13:17:43.597 +STEP: patching the Endpoint 08/24/23 13:17:43.616 +STEP: fetching the Endpoint 08/24/23 13:17:43.654 +STEP: deleting the Endpoint by Collection 08/24/23 13:17:43.666 +STEP: waiting for Endpoint deletion 08/24/23 13:17:43.709 +STEP: fetching the Endpoint 08/24/23 13:17:43.717 [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:54.616: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 13:17:43.728: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 -STEP: Destroying namespace "services-7009" for this suite. 07/29/23 17:06:54.626 +STEP: Destroying namespace "services-8150" for this suite. 08/24/23 13:17:43.75 ------------------------------ -• [SLOW TEST] [7.239 seconds] +• [0.573 seconds] [sig-network] Services test/e2e/network/common/framework.go:23 - should be able to create a functioning NodePort service [Conformance] - test/e2e/network/service.go:1302 + should test the lifecycle of an Endpoint [Conformance] + test/e2e/network/service.go:3244 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-network] Services set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:47.4 - Jul 29 17:06:47.401: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename services 07/29/23 17:06:47.403 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:47.434 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:47.439 + STEP: Creating a kubernetes client 08/24/23 13:17:43.195 + Aug 24 13:17:43.195: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename services 08/24/23 13:17:43.198 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:17:43.251 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:17:43.265 [BeforeEach] [sig-network] Services test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] Services test/e2e/network/service.go:766 - [It] should be able to create a functioning NodePort service [Conformance] - test/e2e/network/service.go:1302 - STEP: creating service nodeport-test with type=NodePort in namespace services-7009 07/29/23 17:06:47.443 - STEP: creating replication controller nodeport-test in namespace services-7009 07/29/23 17:06:47.474 - I0729 17:06:47.489419 13 runners.go:193] Created replication controller with name: nodeport-test, namespace: services-7009, replica count: 2 - I0729 17:06:50.541507 13 runners.go:193] nodeport-test Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady - Jul 29 17:06:50.541: INFO: Creating new exec pod - Jul 29 17:06:50.560: INFO: Waiting up to 5m0s for pod "execpodnjs4l" in namespace "services-7009" to be "running" - Jul 29 17:06:50.572: INFO: Pod "execpodnjs4l": Phase="Pending", Reason="", readiness=false. Elapsed: 11.859813ms - Jul 29 17:06:52.584: INFO: Pod "execpodnjs4l": Phase="Running", Reason="", readiness=true. Elapsed: 2.023886783s - Jul 29 17:06:52.584: INFO: Pod "execpodnjs4l" satisfied condition "running" - Jul 29 17:06:53.604: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7009 exec execpodnjs4l -- /bin/sh -x -c nc -v -z -w 2 nodeport-test 80' - Jul 29 17:06:53.884: INFO: stderr: "+ nc -v -z -w 2 nodeport-test 80\nConnection to nodeport-test 80 port [tcp/http] succeeded!\n" - Jul 29 17:06:53.884: INFO: stdout: "" - Jul 29 17:06:53.885: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7009 exec execpodnjs4l -- /bin/sh -x -c nc -v -z -w 2 10.233.53.157 80' - Jul 29 17:06:54.123: INFO: stderr: "+ nc -v -z -w 2 10.233.53.157 80\nConnection to 10.233.53.157 80 port [tcp/http] succeeded!\n" - Jul 29 17:06:54.123: INFO: stdout: "" - Jul 29 17:06:54.124: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7009 exec execpodnjs4l -- /bin/sh -x -c nc -v -z -w 2 192.168.121.120 30825' - Jul 29 17:06:54.360: INFO: stderr: "+ nc -v -z -w 2 192.168.121.120 30825\nConnection to 192.168.121.120 30825 port [tcp/*] succeeded!\n" - Jul 29 17:06:54.360: INFO: stdout: "" - Jul 29 17:06:54.361: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-3177299396 --namespace=services-7009 exec execpodnjs4l -- /bin/sh -x -c nc -v -z -w 2 192.168.121.211 30825' - Jul 29 17:06:54.616: INFO: stderr: "+ nc -v -z -w 2 192.168.121.211 30825\nConnection to 192.168.121.211 30825 port [tcp/*] succeeded!\n" - Jul 29 17:06:54.616: INFO: stdout: "" + [It] should test the lifecycle of an Endpoint [Conformance] + test/e2e/network/service.go:3244 + STEP: creating an Endpoint 08/24/23 13:17:43.371 + STEP: waiting for available Endpoint 08/24/23 13:17:43.39 + STEP: listing all Endpoints 08/24/23 13:17:43.395 + STEP: updating the Endpoint 08/24/23 13:17:43.574 + STEP: fetching the Endpoint 08/24/23 13:17:43.597 + STEP: patching the Endpoint 08/24/23 13:17:43.616 + STEP: fetching the Endpoint 08/24/23 13:17:43.654 + STEP: deleting the Endpoint by Collection 08/24/23 13:17:43.666 + STEP: waiting for Endpoint deletion 08/24/23 13:17:43.709 + STEP: fetching the Endpoint 08/24/23 13:17:43.717 [AfterEach] [sig-network] Services test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:54.616: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 13:17:43.728: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-network] Services test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] Services dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-network] Services tear down framework | framework.go:193 - STEP: Destroying namespace "services-7009" for this suite. 07/29/23 17:06:54.626 + STEP: Destroying namespace "services-8150" for this suite. 08/24/23 13:17:43.75 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSS +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Job + should delete a job [Conformance] + test/e2e/apps/job.go:481 +[BeforeEach] [sig-apps] Job + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 13:17:43.772 +Aug 24 13:17:43.773: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename job 08/24/23 13:17:43.786 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:17:43.833 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:17:43.838 +[BeforeEach] [sig-apps] Job + test/e2e/framework/metrics/init/init.go:31 +[It] should delete a job [Conformance] + test/e2e/apps/job.go:481 +STEP: Creating a job 08/24/23 13:17:43.844 +STEP: Ensuring active pods == parallelism 08/24/23 13:17:43.862 +STEP: delete a job 08/24/23 13:17:47.877 +STEP: deleting Job.batch foo in namespace job-6907, will wait for the garbage collector to delete the pods 08/24/23 13:17:47.877 +Aug 24 13:17:47.995: INFO: Deleting Job.batch foo took: 57.808718ms +Aug 24 13:17:48.196: INFO: Terminating Job.batch foo pods took: 201.192085ms +STEP: Ensuring job was deleted 08/24/23 13:18:19.997 +[AfterEach] [sig-apps] Job + test/e2e/framework/node/init/init.go:32 +Aug 24 13:18:20.004: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] Job + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-apps] Job + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-apps] Job + tear down framework | framework.go:193 +STEP: Destroying namespace "job-6907" for this suite. 08/24/23 13:18:20.012 +------------------------------ +• [SLOW TEST] [36.251 seconds] +[sig-apps] Job +test/e2e/apps/framework.go:23 + should delete a job [Conformance] + test/e2e/apps/job.go:481 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-apps] Job + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 13:17:43.772 + Aug 24 13:17:43.773: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename job 08/24/23 13:17:43.786 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:17:43.833 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:17:43.838 + [BeforeEach] [sig-apps] Job + test/e2e/framework/metrics/init/init.go:31 + [It] should delete a job [Conformance] + test/e2e/apps/job.go:481 + STEP: Creating a job 08/24/23 13:17:43.844 + STEP: Ensuring active pods == parallelism 08/24/23 13:17:43.862 + STEP: delete a job 08/24/23 13:17:47.877 + STEP: deleting Job.batch foo in namespace job-6907, will wait for the garbage collector to delete the pods 08/24/23 13:17:47.877 + Aug 24 13:17:47.995: INFO: Deleting Job.batch foo took: 57.808718ms + Aug 24 13:17:48.196: INFO: Terminating Job.batch foo pods took: 201.192085ms + STEP: Ensuring job was deleted 08/24/23 13:18:19.997 + [AfterEach] [sig-apps] Job + test/e2e/framework/node/init/init.go:32 + Aug 24 13:18:20.004: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] Job + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-apps] Job + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-apps] Job + tear down framework | framework.go:193 + STEP: Destroying namespace "job-6907" for this suite. 08/24/23 13:18:20.012 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSS ------------------------------ [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should mutate configmap [Conformance] - test/e2e/apimachinery/webhook.go:252 + should deny crd creation [Conformance] + test/e2e/apimachinery/webhook.go:308 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:54.64 -Jul 29 17:06:54.640: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 17:06:54.643 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:54.678 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:54.682 +STEP: Creating a kubernetes client 08/24/23 13:18:20.029 +Aug 24 13:18:20.029: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename webhook 08/24/23 13:18:20.031 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:18:20.057 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:18:20.066 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 17:06:54.71 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 17:06:55.416 -STEP: Deploying the webhook pod 07/29/23 17:06:55.433 -STEP: Wait for the deployment to be ready 07/29/23 17:06:55.457 -Jul 29 17:06:55.470: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service 07/29/23 17:06:57.49 -STEP: Verifying the service has paired with the endpoint 07/29/23 17:06:57.509 -Jul 29 17:06:58.510: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should mutate configmap [Conformance] - test/e2e/apimachinery/webhook.go:252 -STEP: Registering the mutating configmap webhook via the AdmissionRegistration API 07/29/23 17:06:58.52 -STEP: create a configmap that should be updated by the webhook 07/29/23 17:06:58.548 +STEP: Setting up server cert 08/24/23 13:18:20.1 +STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 13:18:20.946 +STEP: Deploying the webhook pod 08/24/23 13:18:20.957 +STEP: Wait for the deployment to be ready 08/24/23 13:18:20.98 +Aug 24 13:18:21.002: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service 08/24/23 13:18:23.032 +STEP: Verifying the service has paired with the endpoint 08/24/23 13:18:23.062 +Aug 24 13:18:24.063: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should deny crd creation [Conformance] + test/e2e/apimachinery/webhook.go:308 +STEP: Registering the crd webhook via the AdmissionRegistration API 08/24/23 13:18:24.071 +STEP: Creating a custom resource definition that should be denied by the webhook 08/24/23 13:18:24.111 +Aug 24 13:18:24.111: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 -Jul 29 17:06:58.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 13:18:24.147: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:105 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -36600,42 +36328,43 @@ Jul 29 17:06:58.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-1603" for this suite. 07/29/23 17:06:58.684 -STEP: Destroying namespace "webhook-1603-markers" for this suite. 07/29/23 17:06:58.698 +STEP: Destroying namespace "webhook-6724" for this suite. 08/24/23 13:18:24.274 +STEP: Destroying namespace "webhook-6724-markers" for this suite. 08/24/23 13:18:24.287 ------------------------------ -• [4.074 seconds] +• [4.279 seconds] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/framework.go:23 - should mutate configmap [Conformance] - test/e2e/apimachinery/webhook.go:252 + should deny crd creation [Conformance] + test/e2e/apimachinery/webhook.go:308 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:54.64 - Jul 29 17:06:54.640: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 17:06:54.643 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:54.678 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:54.682 + STEP: Creating a kubernetes client 08/24/23 13:18:20.029 + Aug 24 13:18:20.029: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename webhook 08/24/23 13:18:20.031 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:18:20.057 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:18:20.066 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 17:06:54.71 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 17:06:55.416 - STEP: Deploying the webhook pod 07/29/23 17:06:55.433 - STEP: Wait for the deployment to be ready 07/29/23 17:06:55.457 - Jul 29 17:06:55.470: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set - STEP: Deploying the webhook service 07/29/23 17:06:57.49 - STEP: Verifying the service has paired with the endpoint 07/29/23 17:06:57.509 - Jul 29 17:06:58.510: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should mutate configmap [Conformance] - test/e2e/apimachinery/webhook.go:252 - STEP: Registering the mutating configmap webhook via the AdmissionRegistration API 07/29/23 17:06:58.52 - STEP: create a configmap that should be updated by the webhook 07/29/23 17:06:58.548 + STEP: Setting up server cert 08/24/23 13:18:20.1 + STEP: Create role binding to let webhook read extension-apiserver-authentication 08/24/23 13:18:20.946 + STEP: Deploying the webhook pod 08/24/23 13:18:20.957 + STEP: Wait for the deployment to be ready 08/24/23 13:18:20.98 + Aug 24 13:18:21.002: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set + STEP: Deploying the webhook service 08/24/23 13:18:23.032 + STEP: Verifying the service has paired with the endpoint 08/24/23 13:18:23.062 + Aug 24 13:18:24.063: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 + [It] should deny crd creation [Conformance] + test/e2e/apimachinery/webhook.go:308 + STEP: Registering the crd webhook via the AdmissionRegistration API 08/24/23 13:18:24.071 + STEP: Creating a custom resource definition that should be denied by the webhook 08/24/23 13:18:24.111 + Aug 24 13:18:24.111: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/framework/node/init/init.go:32 - Jul 29 17:06:58.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 13:18:24.147: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] test/e2e/apimachinery/webhook.go:105 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] @@ -36644,1323 +36373,1506 @@ test/e2e/apimachinery/framework.go:23 dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-1603" for this suite. 07/29/23 17:06:58.684 - STEP: Destroying namespace "webhook-1603-markers" for this suite. 07/29/23 17:06:58.698 + STEP: Destroying namespace "webhook-6724" for this suite. 08/24/23 13:18:24.274 + STEP: Destroying namespace "webhook-6724-markers" for this suite. 08/24/23 13:18:24.287 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSSSS ------------------------------ -[sig-storage] Projected downwardAPI - should update annotations on modification [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:162 -[BeforeEach] [sig-storage] Projected downwardAPI +[sig-apps] CronJob + should replace jobs when ReplaceConcurrent [Conformance] + test/e2e/apps/cronjob.go:160 +[BeforeEach] [sig-apps] CronJob set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:06:58.72 -Jul 29 17:06:58.720: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 17:06:58.728 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:58.769 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:58.775 -[BeforeEach] [sig-storage] Projected downwardAPI +STEP: Creating a kubernetes client 08/24/23 13:18:24.314 +Aug 24 13:18:24.314: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename cronjob 08/24/23 13:18:24.316 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:18:24.356 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:18:24.383 +[BeforeEach] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 -[It] should update annotations on modification [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:162 -STEP: Creating the pod 07/29/23 17:06:58.781 -Jul 29 17:06:58.805: INFO: Waiting up to 5m0s for pod "annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3" in namespace "projected-9231" to be "running and ready" -Jul 29 17:06:58.814: INFO: Pod "annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3": Phase="Pending", Reason="", readiness=false. Elapsed: 8.803831ms -Jul 29 17:06:58.814: INFO: The phase of Pod annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3 is Pending, waiting for it to be Running (with Ready = true) -Jul 29 17:07:00.820: INFO: Pod "annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3": Phase="Running", Reason="", readiness=true. Elapsed: 2.015765642s -Jul 29 17:07:00.821: INFO: The phase of Pod annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3 is Running (Ready = true) -Jul 29 17:07:00.821: INFO: Pod "annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3" satisfied condition "running and ready" -Jul 29 17:07:01.382: INFO: Successfully updated pod "annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3" -[AfterEach] [sig-storage] Projected downwardAPI +[It] should replace jobs when ReplaceConcurrent [Conformance] + test/e2e/apps/cronjob.go:160 +STEP: Creating a ReplaceConcurrent cronjob 08/24/23 13:18:24.395 +STEP: Ensuring a job is scheduled 08/24/23 13:18:24.409 +STEP: Ensuring exactly one is scheduled 08/24/23 13:19:00.417 +STEP: Ensuring exactly one running job exists by listing jobs explicitly 08/24/23 13:19:00.422 +STEP: Ensuring the job is replaced with a new one 08/24/23 13:19:00.428 +STEP: Removing cronjob 08/24/23 13:20:00.436 +[AfterEach] [sig-apps] CronJob test/e2e/framework/node/init/init.go:32 -Jul 29 17:07:03.437: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +Aug 24 13:20:00.457: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] CronJob test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-apps] CronJob dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected downwardAPI +[DeferCleanup (Each)] [sig-apps] CronJob tear down framework | framework.go:193 -STEP: Destroying namespace "projected-9231" for this suite. 07/29/23 17:07:03.447 +STEP: Destroying namespace "cronjob-1717" for this suite. 08/24/23 13:20:00.471 ------------------------------ -• [4.737 seconds] -[sig-storage] Projected downwardAPI +• [SLOW TEST] [96.176 seconds] +[sig-apps] CronJob +test/e2e/apps/framework.go:23 + should replace jobs when ReplaceConcurrent [Conformance] + test/e2e/apps/cronjob.go:160 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-apps] CronJob + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 13:18:24.314 + Aug 24 13:18:24.314: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename cronjob 08/24/23 13:18:24.316 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:18:24.356 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:18:24.383 + [BeforeEach] [sig-apps] CronJob + test/e2e/framework/metrics/init/init.go:31 + [It] should replace jobs when ReplaceConcurrent [Conformance] + test/e2e/apps/cronjob.go:160 + STEP: Creating a ReplaceConcurrent cronjob 08/24/23 13:18:24.395 + STEP: Ensuring a job is scheduled 08/24/23 13:18:24.409 + STEP: Ensuring exactly one is scheduled 08/24/23 13:19:00.417 + STEP: Ensuring exactly one running job exists by listing jobs explicitly 08/24/23 13:19:00.422 + STEP: Ensuring the job is replaced with a new one 08/24/23 13:19:00.428 + STEP: Removing cronjob 08/24/23 13:20:00.436 + [AfterEach] [sig-apps] CronJob + test/e2e/framework/node/init/init.go:32 + Aug 24 13:20:00.457: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] CronJob + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-apps] CronJob + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-apps] CronJob + tear down framework | framework.go:193 + STEP: Destroying namespace "cronjob-1717" for this suite. 08/24/23 13:20:00.471 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:125 +[BeforeEach] [sig-storage] Secrets + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 13:20:00.503 +Aug 24 13:20:00.503: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename secrets 08/24/23 13:20:00.506 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:00.547 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:00.55 +[BeforeEach] [sig-storage] Secrets + test/e2e/framework/metrics/init/init.go:31 +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:125 +STEP: Creating secret with name secret-test-1ace88eb-68ec-49b1-b267-a1e874d79e4f 08/24/23 13:20:00.557 +STEP: Creating a pod to test consume secrets 08/24/23 13:20:00.569 +Aug 24 13:20:00.588: INFO: Waiting up to 5m0s for pod "pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3" in namespace "secrets-4604" to be "Succeeded or Failed" +Aug 24 13:20:00.598: INFO: Pod "pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3": Phase="Pending", Reason="", readiness=false. Elapsed: 10.073448ms +Aug 24 13:20:02.610: INFO: Pod "pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021799747s +Aug 24 13:20:04.607: INFO: Pod "pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018796888s +STEP: Saw pod success 08/24/23 13:20:04.607 +Aug 24 13:20:04.607: INFO: Pod "pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3" satisfied condition "Succeeded or Failed" +Aug 24 13:20:04.616: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3 container secret-volume-test: +STEP: delete the pod 08/24/23 13:20:04.651 +Aug 24 13:20:04.675: INFO: Waiting for pod pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3 to disappear +Aug 24 13:20:04.681: INFO: Pod pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3 no longer exists +[AfterEach] [sig-storage] Secrets + test/e2e/framework/node/init/init.go:32 +Aug 24 13:20:04.682: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Secrets + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-storage] Secrets + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-storage] Secrets + tear down framework | framework.go:193 +STEP: Destroying namespace "secrets-4604" for this suite. 08/24/23 13:20:04.693 +------------------------------ +• [4.206 seconds] +[sig-storage] Secrets test/e2e/common/storage/framework.go:23 - should update annotations on modification [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:162 + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:125 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected downwardAPI + [BeforeEach] [sig-storage] Secrets set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:06:58.72 - Jul 29 17:06:58.720: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 17:06:58.728 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:06:58.769 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:06:58.775 - [BeforeEach] [sig-storage] Projected downwardAPI + STEP: Creating a kubernetes client 08/24/23 13:20:00.503 + Aug 24 13:20:00.503: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename secrets 08/24/23 13:20:00.506 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:00.547 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:00.55 + [BeforeEach] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-storage] Projected downwardAPI - test/e2e/common/storage/projected_downwardapi.go:44 - [It] should update annotations on modification [NodeConformance] [Conformance] - test/e2e/common/storage/projected_downwardapi.go:162 - STEP: Creating the pod 07/29/23 17:06:58.781 - Jul 29 17:06:58.805: INFO: Waiting up to 5m0s for pod "annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3" in namespace "projected-9231" to be "running and ready" - Jul 29 17:06:58.814: INFO: Pod "annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3": Phase="Pending", Reason="", readiness=false. Elapsed: 8.803831ms - Jul 29 17:06:58.814: INFO: The phase of Pod annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3 is Pending, waiting for it to be Running (with Ready = true) - Jul 29 17:07:00.820: INFO: Pod "annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3": Phase="Running", Reason="", readiness=true. Elapsed: 2.015765642s - Jul 29 17:07:00.821: INFO: The phase of Pod annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3 is Running (Ready = true) - Jul 29 17:07:00.821: INFO: Pod "annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3" satisfied condition "running and ready" - Jul 29 17:07:01.382: INFO: Successfully updated pod "annotationupdate20a38e32-9a35-4603-9d96-e620e37ec1a3" - [AfterEach] [sig-storage] Projected downwardAPI + [It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + test/e2e/common/storage/secrets_volume.go:125 + STEP: Creating secret with name secret-test-1ace88eb-68ec-49b1-b267-a1e874d79e4f 08/24/23 13:20:00.557 + STEP: Creating a pod to test consume secrets 08/24/23 13:20:00.569 + Aug 24 13:20:00.588: INFO: Waiting up to 5m0s for pod "pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3" in namespace "secrets-4604" to be "Succeeded or Failed" + Aug 24 13:20:00.598: INFO: Pod "pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3": Phase="Pending", Reason="", readiness=false. Elapsed: 10.073448ms + Aug 24 13:20:02.610: INFO: Pod "pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021799747s + Aug 24 13:20:04.607: INFO: Pod "pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018796888s + STEP: Saw pod success 08/24/23 13:20:04.607 + Aug 24 13:20:04.607: INFO: Pod "pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3" satisfied condition "Succeeded or Failed" + Aug 24 13:20:04.616: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3 container secret-volume-test: + STEP: delete the pod 08/24/23 13:20:04.651 + Aug 24 13:20:04.675: INFO: Waiting for pod pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3 to disappear + Aug 24 13:20:04.681: INFO: Pod pod-secrets-a57a5d16-ae2c-417a-a676-e25f8a9d55c3 no longer exists + [AfterEach] [sig-storage] Secrets test/e2e/framework/node/init/init.go:32 - Jul 29 17:07:03.437: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + Aug 24 13:20:04.682: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Secrets test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-storage] Secrets dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected downwardAPI + [DeferCleanup (Each)] [sig-storage] Secrets tear down framework | framework.go:193 - STEP: Destroying namespace "projected-9231" for this suite. 07/29/23 17:07:03.447 + STEP: Destroying namespace "secrets-4604" for this suite. 08/24/23 13:20:04.693 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSS +[sig-storage] CSIInlineVolumes + should support CSIVolumeSource in Pod API [Conformance] + test/e2e/storage/csi_inline.go:131 +[BeforeEach] [sig-storage] CSIInlineVolumes + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 13:20:04.712 +Aug 24 13:20:04.712: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename csiinlinevolumes 08/24/23 13:20:04.715 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:04.744 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:04.748 +[BeforeEach] [sig-storage] CSIInlineVolumes + test/e2e/framework/metrics/init/init.go:31 +[It] should support CSIVolumeSource in Pod API [Conformance] + test/e2e/storage/csi_inline.go:131 +STEP: creating 08/24/23 13:20:04.754 +STEP: getting 08/24/23 13:20:04.783 +STEP: listing in namespace 08/24/23 13:20:04.797 +STEP: patching 08/24/23 13:20:04.806 +STEP: deleting 08/24/23 13:20:04.839 +[AfterEach] [sig-storage] CSIInlineVolumes + test/e2e/framework/node/init/init.go:32 +Aug 24 13:20:04.858: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + tear down framework | framework.go:193 +STEP: Destroying namespace "csiinlinevolumes-6324" for this suite. 08/24/23 13:20:04.867 ------------------------------ -[sig-node] InitContainer [NodeConformance] - should not start app containers if init containers fail on a RestartAlways pod [Conformance] - test/e2e/common/node/init_container.go:334 -[BeforeEach] [sig-node] InitContainer [NodeConformance] +• [0.171 seconds] +[sig-storage] CSIInlineVolumes +test/e2e/storage/utils/framework.go:23 + should support CSIVolumeSource in Pod API [Conformance] + test/e2e/storage/csi_inline.go:131 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-storage] CSIInlineVolumes + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 13:20:04.712 + Aug 24 13:20:04.712: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename csiinlinevolumes 08/24/23 13:20:04.715 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:04.744 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:04.748 + [BeforeEach] [sig-storage] CSIInlineVolumes + test/e2e/framework/metrics/init/init.go:31 + [It] should support CSIVolumeSource in Pod API [Conformance] + test/e2e/storage/csi_inline.go:131 + STEP: creating 08/24/23 13:20:04.754 + STEP: getting 08/24/23 13:20:04.783 + STEP: listing in namespace 08/24/23 13:20:04.797 + STEP: patching 08/24/23 13:20:04.806 + STEP: deleting 08/24/23 13:20:04.839 + [AfterEach] [sig-storage] CSIInlineVolumes + test/e2e/framework/node/init/init.go:32 + Aug 24 13:20:04.858: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-storage] CSIInlineVolumes + tear down framework | framework.go:193 + STEP: Destroying namespace "csiinlinevolumes-6324" for this suite. 08/24/23 13:20:04.867 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide container's cpu limit [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:193 +[BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:07:03.468 -Jul 29 17:07:03.469: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename init-container 07/29/23 17:07:03.471 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:07:03.503 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:07:03.506 -[BeforeEach] [sig-node] InitContainer [NodeConformance] +STEP: Creating a kubernetes client 08/24/23 13:20:04.887 +Aug 24 13:20:04.887: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 13:20:04.889 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:04.915 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:04.92 +[BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-node] InitContainer [NodeConformance] - test/e2e/common/node/init_container.go:165 -[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] - test/e2e/common/node/init_container.go:334 -STEP: creating the pod 07/29/23 17:07:03.51 -Jul 29 17:07:03.510: INFO: PodSpec: initContainers in spec.initContainers -Jul 29 17:07:46.595: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-e70c7298-d157-47ae-b45a-7f739c8b0825", GenerateName:"", Namespace:"init-container-9219", SelfLink:"", UID:"a142b171-c981-430c-bfba-bbdfc96cd557", ResourceVersion:"40603", Generation:0, CreationTimestamp:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"510888492"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"e2e.test", Operation:"Update", APIVersion:"v1", Time:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc005ef4408), Subresource:""}, v1.ManagedFieldsEntry{Manager:"kubelet", Operation:"Update", APIVersion:"v1", Time:time.Date(2023, time.July, 29, 17, 7, 46, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc005ef4468), Subresource:"status"}}}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-api-access-wfvpj", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(0xc000e6bb40), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil), Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-wfvpj", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil), Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-wfvpj", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"registry.k8s.io/pause:3.9", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-wfvpj", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc006cff3d8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"wetuj3nuajog-3", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc003deacb0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc006cff460)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc006cff480)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc006cff488), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc006cff48c), PreemptionPolicy:(*v1.PreemptionPolicy)(0xc000d3a2c0), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil), OS:(*v1.PodOS)(nil), HostUsers:(*bool)(nil), SchedulingGates:[]v1.PodSchedulingGate(nil), ResourceClaims:[]v1.PodResourceClaim(nil)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"192.168.121.141", PodIP:"10.233.66.2", PodIPs:[]v1.PodIP{v1.PodIP{IP:"10.233.66.2"}}, StartTime:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc003dead90)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc003deae00)}, Ready:false, RestartCount:3, Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", ImageID:"registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937", ContainerID:"cri-o://77d42d7e2b98c7c2672d5762091a7ca0b46bc278f948d84bd19b469bf60c1e72", Started:(*bool)(nil)}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc000e6bbc0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", ImageID:"", ContainerID:"", Started:(*bool)(nil)}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc000e6bba0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"registry.k8s.io/pause:3.9", ImageID:"", ContainerID:"", Started:(*bool)(0xc006cff50f)}}, QOSClass:"Burstable", EphemeralContainerStatuses:[]v1.ContainerStatus(nil)}} -[AfterEach] [sig-node] InitContainer [NodeConformance] +[BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 +[It] should provide container's cpu limit [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:193 +STEP: Creating a pod to test downward API volume plugin 08/24/23 13:20:04.964 +Aug 24 13:20:04.981: INFO: Waiting up to 5m0s for pod "downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd" in namespace "downward-api-7803" to be "Succeeded or Failed" +Aug 24 13:20:04.992: INFO: Pod "downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd": Phase="Pending", Reason="", readiness=false. Elapsed: 10.682ms +Aug 24 13:20:07.011: INFO: Pod "downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.029497935s +Aug 24 13:20:09.002: INFO: Pod "downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020433363s +STEP: Saw pod success 08/24/23 13:20:09.002 +Aug 24 13:20:09.002: INFO: Pod "downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd" satisfied condition "Succeeded or Failed" +Aug 24 13:20:09.010: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd container client-container: +STEP: delete the pod 08/24/23 13:20:09.022 +Aug 24 13:20:09.044: INFO: Waiting for pod downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd to disappear +Aug 24 13:20:09.048: INFO: Pod downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd no longer exists +[AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 -Jul 29 17:07:46.598: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +Aug 24 13:20:09.048: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +[DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] +[DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 -STEP: Destroying namespace "init-container-9219" for this suite. 07/29/23 17:07:46.607 +STEP: Destroying namespace "downward-api-7803" for this suite. 08/24/23 13:20:09.057 ------------------------------ -• [SLOW TEST] [43.150 seconds] -[sig-node] InitContainer [NodeConformance] -test/e2e/common/node/framework.go:23 - should not start app containers if init containers fail on a RestartAlways pod [Conformance] - test/e2e/common/node/init_container.go:334 +• [4.198 seconds] +[sig-storage] Downward API volume +test/e2e/common/storage/framework.go:23 + should provide container's cpu limit [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:193 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] InitContainer [NodeConformance] + [BeforeEach] [sig-storage] Downward API volume set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:07:03.468 - Jul 29 17:07:03.469: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename init-container 07/29/23 17:07:03.471 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:07:03.503 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:07:03.506 - [BeforeEach] [sig-node] InitContainer [NodeConformance] + STEP: Creating a kubernetes client 08/24/23 13:20:04.887 + Aug 24 13:20:04.887: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 13:20:04.889 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:04.915 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:04.92 + [BeforeEach] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-node] InitContainer [NodeConformance] - test/e2e/common/node/init_container.go:165 - [It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] - test/e2e/common/node/init_container.go:334 - STEP: creating the pod 07/29/23 17:07:03.51 - Jul 29 17:07:03.510: INFO: PodSpec: initContainers in spec.initContainers - Jul 29 17:07:46.595: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-e70c7298-d157-47ae-b45a-7f739c8b0825", GenerateName:"", Namespace:"init-container-9219", SelfLink:"", UID:"a142b171-c981-430c-bfba-bbdfc96cd557", ResourceVersion:"40603", Generation:0, CreationTimestamp:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"510888492"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"e2e.test", Operation:"Update", APIVersion:"v1", Time:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc005ef4408), Subresource:""}, v1.ManagedFieldsEntry{Manager:"kubelet", Operation:"Update", APIVersion:"v1", Time:time.Date(2023, time.July, 29, 17, 7, 46, 0, time.Local), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc005ef4468), Subresource:"status"}}}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-api-access-wfvpj", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(0xc000e6bb40), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil), Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-wfvpj", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil), Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-wfvpj", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"registry.k8s.io/pause:3.9", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Claims:[]v1.ResourceClaim(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-wfvpj", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc006cff3d8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"wetuj3nuajog-3", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc003deacb0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc006cff460)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc006cff480)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc006cff488), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc006cff48c), PreemptionPolicy:(*v1.PreemptionPolicy)(0xc000d3a2c0), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil), OS:(*v1.PodOS)(nil), HostUsers:(*bool)(nil), SchedulingGates:[]v1.PodSchedulingGate(nil), ResourceClaims:[]v1.PodResourceClaim(nil)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"192.168.121.141", PodIP:"10.233.66.2", PodIPs:[]v1.PodIP{v1.PodIP{IP:"10.233.66.2"}}, StartTime:time.Date(2023, time.July, 29, 17, 7, 3, 0, time.Local), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc003dead90)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc003deae00)}, Ready:false, RestartCount:3, Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", ImageID:"registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937", ContainerID:"cri-o://77d42d7e2b98c7c2672d5762091a7ca0b46bc278f948d84bd19b469bf60c1e72", Started:(*bool)(nil)}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc000e6bbc0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"registry.k8s.io/e2e-test-images/busybox:1.29-4", ImageID:"", ContainerID:"", Started:(*bool)(nil)}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc000e6bba0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"registry.k8s.io/pause:3.9", ImageID:"", ContainerID:"", Started:(*bool)(0xc006cff50f)}}, QOSClass:"Burstable", EphemeralContainerStatuses:[]v1.ContainerStatus(nil)}} - [AfterEach] [sig-node] InitContainer [NodeConformance] + [BeforeEach] [sig-storage] Downward API volume + test/e2e/common/storage/downwardapi_volume.go:44 + [It] should provide container's cpu limit [NodeConformance] [Conformance] + test/e2e/common/storage/downwardapi_volume.go:193 + STEP: Creating a pod to test downward API volume plugin 08/24/23 13:20:04.964 + Aug 24 13:20:04.981: INFO: Waiting up to 5m0s for pod "downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd" in namespace "downward-api-7803" to be "Succeeded or Failed" + Aug 24 13:20:04.992: INFO: Pod "downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd": Phase="Pending", Reason="", readiness=false. Elapsed: 10.682ms + Aug 24 13:20:07.011: INFO: Pod "downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.029497935s + Aug 24 13:20:09.002: INFO: Pod "downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020433363s + STEP: Saw pod success 08/24/23 13:20:09.002 + Aug 24 13:20:09.002: INFO: Pod "downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd" satisfied condition "Succeeded or Failed" + Aug 24 13:20:09.010: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd container client-container: + STEP: delete the pod 08/24/23 13:20:09.022 + Aug 24 13:20:09.044: INFO: Waiting for pod downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd to disappear + Aug 24 13:20:09.048: INFO: Pod downwardapi-volume-14839d71-e505-4ab6-ad1a-87bc8e06debd no longer exists + [AfterEach] [sig-storage] Downward API volume test/e2e/framework/node/init/init.go:32 - Jul 29 17:07:46.598: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + Aug 24 13:20:09.048: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Downward API volume test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + [DeferCleanup (Each)] [sig-storage] Downward API volume dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] InitContainer [NodeConformance] + [DeferCleanup (Each)] [sig-storage] Downward API volume tear down framework | framework.go:193 - STEP: Destroying namespace "init-container-9219" for this suite. 07/29/23 17:07:46.607 + STEP: Destroying namespace "downward-api-7803" for this suite. 08/24/23 13:20:09.057 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSS ------------------------------ -[sig-node] Downward API - should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:217 -[BeforeEach] [sig-node] Downward API +[sig-storage] Projected downwardAPI + should provide container's memory limit [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:207 +[BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:07:46.631 -Jul 29 17:07:46.631: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 17:07:46.634 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:07:46.671 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:07:46.676 -[BeforeEach] [sig-node] Downward API +STEP: Creating a kubernetes client 08/24/23 13:20:09.091 +Aug 24 13:20:09.092: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 13:20:09.094 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:09.118 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:09.122 +[BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 -[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:217 -STEP: Creating a pod to test downward api env vars 07/29/23 17:07:46.682 -Jul 29 17:07:46.698: INFO: Waiting up to 5m0s for pod "downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d" in namespace "downward-api-6397" to be "Succeeded or Failed" -Jul 29 17:07:46.704: INFO: Pod "downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.949477ms -Jul 29 17:07:48.714: INFO: Pod "downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015208988s -Jul 29 17:07:50.716: INFO: Pod "downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018031795s -STEP: Saw pod success 07/29/23 17:07:50.717 -Jul 29 17:07:50.717: INFO: Pod "downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d" satisfied condition "Succeeded or Failed" -Jul 29 17:07:50.722: INFO: Trying to get logs from node wetuj3nuajog-3 pod downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d container dapi-container: -STEP: delete the pod 07/29/23 17:07:50.74 -Jul 29 17:07:50.762: INFO: Waiting for pod downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d to disappear -Jul 29 17:07:50.767: INFO: Pod downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d no longer exists -[AfterEach] [sig-node] Downward API +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 +[It] should provide container's memory limit [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:207 +STEP: Creating a pod to test downward API volume plugin 08/24/23 13:20:09.128 +Aug 24 13:20:09.141: INFO: Waiting up to 5m0s for pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d" in namespace "projected-2801" to be "Succeeded or Failed" +Aug 24 13:20:09.147: INFO: Pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.73949ms +Aug 24 13:20:11.156: INFO: Pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d": Phase="Running", Reason="", readiness=true. Elapsed: 2.015155227s +Aug 24 13:20:13.156: INFO: Pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d": Phase="Running", Reason="", readiness=false. Elapsed: 4.014571663s +Aug 24 13:20:15.155: INFO: Pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.013577579s +STEP: Saw pod success 08/24/23 13:20:15.155 +Aug 24 13:20:15.155: INFO: Pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d" satisfied condition "Succeeded or Failed" +Aug 24 13:20:15.162: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d container client-container: +STEP: delete the pod 08/24/23 13:20:15.175 +Aug 24 13:20:15.195: INFO: Waiting for pod downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d to disappear +Aug 24 13:20:15.200: INFO: Pod downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d no longer exists +[AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 -Jul 29 17:07:50.767: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Downward API +Aug 24 13:20:15.201: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Downward API +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Downward API +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-6397" for this suite. 07/29/23 17:07:50.777 +STEP: Destroying namespace "projected-2801" for this suite. 08/24/23 13:20:15.213 ------------------------------ -• [4.155 seconds] -[sig-node] Downward API -test/e2e/common/node/framework.go:23 - should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:217 +• [SLOW TEST] [6.132 seconds] +[sig-storage] Projected downwardAPI +test/e2e/common/storage/framework.go:23 + should provide container's memory limit [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:207 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Downward API + [BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:07:46.631 - Jul 29 17:07:46.631: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 17:07:46.634 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:07:46.671 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:07:46.676 - [BeforeEach] [sig-node] Downward API + STEP: Creating a kubernetes client 08/24/23 13:20:09.091 + Aug 24 13:20:09.092: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 13:20:09.094 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:09.118 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:09.122 + [BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 - [It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:217 - STEP: Creating a pod to test downward api env vars 07/29/23 17:07:46.682 - Jul 29 17:07:46.698: INFO: Waiting up to 5m0s for pod "downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d" in namespace "downward-api-6397" to be "Succeeded or Failed" - Jul 29 17:07:46.704: INFO: Pod "downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.949477ms - Jul 29 17:07:48.714: INFO: Pod "downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015208988s - Jul 29 17:07:50.716: INFO: Pod "downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018031795s - STEP: Saw pod success 07/29/23 17:07:50.717 - Jul 29 17:07:50.717: INFO: Pod "downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d" satisfied condition "Succeeded or Failed" - Jul 29 17:07:50.722: INFO: Trying to get logs from node wetuj3nuajog-3 pod downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d container dapi-container: - STEP: delete the pod 07/29/23 17:07:50.74 - Jul 29 17:07:50.762: INFO: Waiting for pod downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d to disappear - Jul 29 17:07:50.767: INFO: Pod downward-api-dcab516a-863b-47b5-8823-ac4aafcf276d no longer exists - [AfterEach] [sig-node] Downward API + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 + [It] should provide container's memory limit [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:207 + STEP: Creating a pod to test downward API volume plugin 08/24/23 13:20:09.128 + Aug 24 13:20:09.141: INFO: Waiting up to 5m0s for pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d" in namespace "projected-2801" to be "Succeeded or Failed" + Aug 24 13:20:09.147: INFO: Pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.73949ms + Aug 24 13:20:11.156: INFO: Pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d": Phase="Running", Reason="", readiness=true. Elapsed: 2.015155227s + Aug 24 13:20:13.156: INFO: Pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d": Phase="Running", Reason="", readiness=false. Elapsed: 4.014571663s + Aug 24 13:20:15.155: INFO: Pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.013577579s + STEP: Saw pod success 08/24/23 13:20:15.155 + Aug 24 13:20:15.155: INFO: Pod "downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d" satisfied condition "Succeeded or Failed" + Aug 24 13:20:15.162: INFO: Trying to get logs from node pe9deep4seen-3 pod downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d container client-container: + STEP: delete the pod 08/24/23 13:20:15.175 + Aug 24 13:20:15.195: INFO: Waiting for pod downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d to disappear + Aug 24 13:20:15.200: INFO: Pod downwardapi-volume-297e6edf-252d-4d19-b9e3-044364e1412d no longer exists + [AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 - Jul 29 17:07:50.767: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Downward API + Aug 24 13:20:15.201: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Downward API + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Downward API + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-6397" for this suite. 07/29/23 17:07:50.777 + STEP: Destroying namespace "projected-2801" for this suite. 08/24/23 13:20:15.213 << End Captured GinkgoWriter Output ------------------------------ -S ------------------------------- [sig-apps] Job - should create pods for an Indexed job with completion indexes and specified hostname [Conformance] - test/e2e/apps/job.go:366 + should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] + test/e2e/apps/job.go:426 [BeforeEach] [sig-apps] Job set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:07:50.786 -Jul 29 17:07:50.786: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename job 07/29/23 17:07:50.788 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:07:50.816 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:07:50.819 +STEP: Creating a kubernetes client 08/24/23 13:20:15.225 +Aug 24 13:20:15.226: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename job 08/24/23 13:20:15.228 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:15.258 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:15.261 [BeforeEach] [sig-apps] Job test/e2e/framework/metrics/init/init.go:31 -[It] should create pods for an Indexed job with completion indexes and specified hostname [Conformance] - test/e2e/apps/job.go:366 -STEP: Creating Indexed job 07/29/23 17:07:50.823 -STEP: Ensuring job reaches completions 07/29/23 17:07:50.83 -STEP: Ensuring pods with index for job exist 07/29/23 17:08:00.837 +[It] should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] + test/e2e/apps/job.go:426 +STEP: Creating a job 08/24/23 13:20:15.266 +STEP: Ensuring job reaches completions 08/24/23 13:20:15.277 [AfterEach] [sig-apps] Job test/e2e/framework/node/init/init.go:32 -Jul 29 17:08:00.845: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +Aug 24 13:20:29.285: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] Job test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] Job dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-apps] Job tear down framework | framework.go:193 -STEP: Destroying namespace "job-5518" for this suite. 07/29/23 17:08:00.852 +STEP: Destroying namespace "job-8399" for this suite. 08/24/23 13:20:29.299 ------------------------------ -• [SLOW TEST] [10.076 seconds] +• [SLOW TEST] [14.087 seconds] [sig-apps] Job test/e2e/apps/framework.go:23 - should create pods for an Indexed job with completion indexes and specified hostname [Conformance] - test/e2e/apps/job.go:366 + should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] + test/e2e/apps/job.go:426 Begin Captured GinkgoWriter Output >> [BeforeEach] [sig-apps] Job set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:07:50.786 - Jul 29 17:07:50.786: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename job 07/29/23 17:07:50.788 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:07:50.816 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:07:50.819 + STEP: Creating a kubernetes client 08/24/23 13:20:15.225 + Aug 24 13:20:15.226: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename job 08/24/23 13:20:15.228 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:15.258 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:15.261 [BeforeEach] [sig-apps] Job test/e2e/framework/metrics/init/init.go:31 - [It] should create pods for an Indexed job with completion indexes and specified hostname [Conformance] - test/e2e/apps/job.go:366 - STEP: Creating Indexed job 07/29/23 17:07:50.823 - STEP: Ensuring job reaches completions 07/29/23 17:07:50.83 - STEP: Ensuring pods with index for job exist 07/29/23 17:08:00.837 + [It] should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] + test/e2e/apps/job.go:426 + STEP: Creating a job 08/24/23 13:20:15.266 + STEP: Ensuring job reaches completions 08/24/23 13:20:15.277 [AfterEach] [sig-apps] Job test/e2e/framework/node/init/init.go:32 - Jul 29 17:08:00.845: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + Aug 24 13:20:29.285: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] Job test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] Job dump namespaces | framework.go:196 [DeferCleanup (Each)] [sig-apps] Job tear down framework | framework.go:193 - STEP: Destroying namespace "job-5518" for this suite. 07/29/23 17:08:00.852 + STEP: Destroying namespace "job-8399" for this suite. 08/24/23 13:20:29.299 << End Captured GinkgoWriter Output ------------------------------ -[sig-apps] Job - should delete a job [Conformance] - test/e2e/apps/job.go:481 -[BeforeEach] [sig-apps] Job +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Pods + should support retrieving logs from the container over websockets [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:618 +[BeforeEach] [sig-node] Pods set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:08:00.866 -Jul 29 17:08:00.867: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename job 07/29/23 17:08:00.871 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:08:00.906 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:08:00.91 -[BeforeEach] [sig-apps] Job +STEP: Creating a kubernetes client 08/24/23 13:20:29.317 +Aug 24 13:20:29.318: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename pods 08/24/23 13:20:29.32 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:29.351 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:29.355 +[BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 -[It] should delete a job [Conformance] - test/e2e/apps/job.go:481 -STEP: Creating a job 07/29/23 17:08:00.916 -STEP: Ensuring active pods == parallelism 07/29/23 17:08:00.923 -STEP: delete a job 07/29/23 17:08:02.938 -STEP: deleting Job.batch foo in namespace job-1088, will wait for the garbage collector to delete the pods 07/29/23 17:08:02.939 -Jul 29 17:08:03.008: INFO: Deleting Job.batch foo took: 11.209694ms -Jul 29 17:08:03.109: INFO: Terminating Job.batch foo pods took: 101.04969ms -STEP: Ensuring job was deleted 07/29/23 17:08:36.01 -[AfterEach] [sig-apps] Job +[BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 +[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:618 +Aug 24 13:20:29.360: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: creating the pod 08/24/23 13:20:29.363 +STEP: submitting the pod to kubernetes 08/24/23 13:20:29.364 +Aug 24 13:20:29.382: INFO: Waiting up to 5m0s for pod "pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832" in namespace "pods-6461" to be "running and ready" +Aug 24 13:20:29.389: INFO: Pod "pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832": Phase="Pending", Reason="", readiness=false. Elapsed: 7.07719ms +Aug 24 13:20:29.389: INFO: The phase of Pod pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 13:20:31.447: INFO: Pod "pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832": Phase="Pending", Reason="", readiness=false. Elapsed: 2.065690012s +Aug 24 13:20:31.448: INFO: The phase of Pod pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832 is Pending, waiting for it to be Running (with Ready = true) +Aug 24 13:20:33.399: INFO: Pod "pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832": Phase="Running", Reason="", readiness=true. Elapsed: 4.017222199s +Aug 24 13:20:33.399: INFO: The phase of Pod pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832 is Running (Ready = true) +Aug 24 13:20:33.399: INFO: Pod "pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832" satisfied condition "running and ready" +[AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 -Jul 29 17:08:36.018: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] Job +Aug 24 13:20:33.447: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] Job +[DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] Job +[DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 -STEP: Destroying namespace "job-1088" for this suite. 07/29/23 17:08:36.027 +STEP: Destroying namespace "pods-6461" for this suite. 08/24/23 13:20:33.461 ------------------------------ -• [SLOW TEST] [35.172 seconds] -[sig-apps] Job -test/e2e/apps/framework.go:23 - should delete a job [Conformance] - test/e2e/apps/job.go:481 +• [4.155 seconds] +[sig-node] Pods +test/e2e/common/node/framework.go:23 + should support retrieving logs from the container over websockets [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:618 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] Job + [BeforeEach] [sig-node] Pods set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:08:00.866 - Jul 29 17:08:00.867: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename job 07/29/23 17:08:00.871 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:08:00.906 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:08:00.91 - [BeforeEach] [sig-apps] Job + STEP: Creating a kubernetes client 08/24/23 13:20:29.317 + Aug 24 13:20:29.318: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename pods 08/24/23 13:20:29.32 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:29.351 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:29.355 + [BeforeEach] [sig-node] Pods test/e2e/framework/metrics/init/init.go:31 - [It] should delete a job [Conformance] - test/e2e/apps/job.go:481 - STEP: Creating a job 07/29/23 17:08:00.916 - STEP: Ensuring active pods == parallelism 07/29/23 17:08:00.923 - STEP: delete a job 07/29/23 17:08:02.938 - STEP: deleting Job.batch foo in namespace job-1088, will wait for the garbage collector to delete the pods 07/29/23 17:08:02.939 - Jul 29 17:08:03.008: INFO: Deleting Job.batch foo took: 11.209694ms - Jul 29 17:08:03.109: INFO: Terminating Job.batch foo pods took: 101.04969ms - STEP: Ensuring job was deleted 07/29/23 17:08:36.01 - [AfterEach] [sig-apps] Job + [BeforeEach] [sig-node] Pods + test/e2e/common/node/pods.go:194 + [It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance] + test/e2e/common/node/pods.go:618 + Aug 24 13:20:29.360: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: creating the pod 08/24/23 13:20:29.363 + STEP: submitting the pod to kubernetes 08/24/23 13:20:29.364 + Aug 24 13:20:29.382: INFO: Waiting up to 5m0s for pod "pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832" in namespace "pods-6461" to be "running and ready" + Aug 24 13:20:29.389: INFO: Pod "pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832": Phase="Pending", Reason="", readiness=false. Elapsed: 7.07719ms + Aug 24 13:20:29.389: INFO: The phase of Pod pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 13:20:31.447: INFO: Pod "pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832": Phase="Pending", Reason="", readiness=false. Elapsed: 2.065690012s + Aug 24 13:20:31.448: INFO: The phase of Pod pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832 is Pending, waiting for it to be Running (with Ready = true) + Aug 24 13:20:33.399: INFO: Pod "pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832": Phase="Running", Reason="", readiness=true. Elapsed: 4.017222199s + Aug 24 13:20:33.399: INFO: The phase of Pod pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832 is Running (Ready = true) + Aug 24 13:20:33.399: INFO: Pod "pod-logs-websocket-815ee418-c888-4e63-b09a-90dd61201832" satisfied condition "running and ready" + [AfterEach] [sig-node] Pods test/e2e/framework/node/init/init.go:32 - Jul 29 17:08:36.018: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] Job + Aug 24 13:20:33.447: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Pods test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] Job + [DeferCleanup (Each)] [sig-node] Pods dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] Job + [DeferCleanup (Each)] [sig-node] Pods tear down framework | framework.go:193 - STEP: Destroying namespace "job-1088" for this suite. 07/29/23 17:08:36.027 + STEP: Destroying namespace "pods-6461" for this suite. 08/24/23 13:20:33.461 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSS ------------------------------ -[sig-apps] DisruptionController - should update/patch PodDisruptionBudget status [Conformance] - test/e2e/apps/disruption.go:164 -[BeforeEach] [sig-apps] DisruptionController +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:109 +[BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:08:36.049 -Jul 29 17:08:36.050: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename disruption 07/29/23 17:08:36.052 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:08:36.085 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:08:36.09 -[BeforeEach] [sig-apps] DisruptionController +STEP: Creating a kubernetes client 08/24/23 13:20:33.479 +Aug 24 13:20:33.479: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 13:20:33.481 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:33.507 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:33.512 +[BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] DisruptionController - test/e2e/apps/disruption.go:72 -[It] should update/patch PodDisruptionBudget status [Conformance] - test/e2e/apps/disruption.go:164 -STEP: Waiting for the pdb to be processed 07/29/23 17:08:36.105 -STEP: Updating PodDisruptionBudget status 07/29/23 17:08:38.121 -STEP: Waiting for all pods to be running 07/29/23 17:08:38.135 -Jul 29 17:08:38.140: INFO: running pods: 0 < 1 -STEP: locating a running pod 07/29/23 17:08:40.15 -STEP: Waiting for the pdb to be processed 07/29/23 17:08:40.172 -STEP: Patching PodDisruptionBudget status 07/29/23 17:08:40.195 -STEP: Waiting for the pdb to be processed 07/29/23 17:08:40.21 -[AfterEach] [sig-apps] DisruptionController +[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:109 +STEP: Creating configMap with name projected-configmap-test-volume-map-8a0c1a9f-d5cc-45de-a31b-b80d16c7c253 08/24/23 13:20:33.518 +STEP: Creating a pod to test consume configMaps 08/24/23 13:20:33.526 +Aug 24 13:20:33.548: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c" in namespace "projected-8062" to be "Succeeded or Failed" +Aug 24 13:20:33.563: INFO: Pod "pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c": Phase="Pending", Reason="", readiness=false. Elapsed: 14.107368ms +Aug 24 13:20:35.574: INFO: Pod "pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.025197834s +Aug 24 13:20:37.573: INFO: Pod "pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024250226s +STEP: Saw pod success 08/24/23 13:20:37.573 +Aug 24 13:20:37.573: INFO: Pod "pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c" satisfied condition "Succeeded or Failed" +Aug 24 13:20:37.578: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c container agnhost-container: +STEP: delete the pod 08/24/23 13:20:37.59 +Aug 24 13:20:37.613: INFO: Waiting for pod pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c to disappear +Aug 24 13:20:37.618: INFO: Pod pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c no longer exists +[AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 -Jul 29 17:08:40.214: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] DisruptionController +Aug 24 13:20:37.618: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] DisruptionController +[DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] DisruptionController +[DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 -STEP: Destroying namespace "disruption-5873" for this suite. 07/29/23 17:08:40.224 +STEP: Destroying namespace "projected-8062" for this suite. 08/24/23 13:20:37.627 ------------------------------ -• [4.185 seconds] -[sig-apps] DisruptionController -test/e2e/apps/framework.go:23 - should update/patch PodDisruptionBudget status [Conformance] - test/e2e/apps/disruption.go:164 +• [4.160 seconds] +[sig-storage] Projected configMap +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:109 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] DisruptionController + [BeforeEach] [sig-storage] Projected configMap set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:08:36.049 - Jul 29 17:08:36.050: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename disruption 07/29/23 17:08:36.052 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:08:36.085 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:08:36.09 - [BeforeEach] [sig-apps] DisruptionController + STEP: Creating a kubernetes client 08/24/23 13:20:33.479 + Aug 24 13:20:33.479: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 13:20:33.481 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:33.507 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:33.512 + [BeforeEach] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] DisruptionController - test/e2e/apps/disruption.go:72 - [It] should update/patch PodDisruptionBudget status [Conformance] - test/e2e/apps/disruption.go:164 - STEP: Waiting for the pdb to be processed 07/29/23 17:08:36.105 - STEP: Updating PodDisruptionBudget status 07/29/23 17:08:38.121 - STEP: Waiting for all pods to be running 07/29/23 17:08:38.135 - Jul 29 17:08:38.140: INFO: running pods: 0 < 1 - STEP: locating a running pod 07/29/23 17:08:40.15 - STEP: Waiting for the pdb to be processed 07/29/23 17:08:40.172 - STEP: Patching PodDisruptionBudget status 07/29/23 17:08:40.195 - STEP: Waiting for the pdb to be processed 07/29/23 17:08:40.21 - [AfterEach] [sig-apps] DisruptionController + [It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + test/e2e/common/storage/projected_configmap.go:109 + STEP: Creating configMap with name projected-configmap-test-volume-map-8a0c1a9f-d5cc-45de-a31b-b80d16c7c253 08/24/23 13:20:33.518 + STEP: Creating a pod to test consume configMaps 08/24/23 13:20:33.526 + Aug 24 13:20:33.548: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c" in namespace "projected-8062" to be "Succeeded or Failed" + Aug 24 13:20:33.563: INFO: Pod "pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c": Phase="Pending", Reason="", readiness=false. Elapsed: 14.107368ms + Aug 24 13:20:35.574: INFO: Pod "pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.025197834s + Aug 24 13:20:37.573: INFO: Pod "pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024250226s + STEP: Saw pod success 08/24/23 13:20:37.573 + Aug 24 13:20:37.573: INFO: Pod "pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c" satisfied condition "Succeeded or Failed" + Aug 24 13:20:37.578: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c container agnhost-container: + STEP: delete the pod 08/24/23 13:20:37.59 + Aug 24 13:20:37.613: INFO: Waiting for pod pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c to disappear + Aug 24 13:20:37.618: INFO: Pod pod-projected-configmaps-e346f715-c600-4b2e-b2ae-337f0448368c no longer exists + [AfterEach] [sig-storage] Projected configMap test/e2e/framework/node/init/init.go:32 - Jul 29 17:08:40.214: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] DisruptionController + Aug 24 13:20:37.618: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected configMap test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] DisruptionController + [DeferCleanup (Each)] [sig-storage] Projected configMap dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] DisruptionController + [DeferCleanup (Each)] [sig-storage] Projected configMap tear down framework | framework.go:193 - STEP: Destroying namespace "disruption-5873" for this suite. 07/29/23 17:08:40.224 + STEP: Destroying namespace "projected-8062" for this suite. 08/24/23 13:20:37.627 << End Captured GinkgoWriter Output ------------------------------ -[sig-apps] DisruptionController - should observe PodDisruptionBudget status updated [Conformance] - test/e2e/apps/disruption.go:141 -[BeforeEach] [sig-apps] DisruptionController +SSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Variable Expansion + should allow composing env vars into new env vars [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:44 +[BeforeEach] [sig-node] Variable Expansion set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:08:40.236 -Jul 29 17:08:40.236: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename disruption 07/29/23 17:08:40.238 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:08:40.283 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:08:40.288 -[BeforeEach] [sig-apps] DisruptionController +STEP: Creating a kubernetes client 08/24/23 13:20:37.641 +Aug 24 13:20:37.641: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename var-expansion 08/24/23 13:20:37.644 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:37.668 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:37.673 +[BeforeEach] [sig-node] Variable Expansion test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-apps] DisruptionController - test/e2e/apps/disruption.go:72 -[It] should observe PodDisruptionBudget status updated [Conformance] - test/e2e/apps/disruption.go:141 -STEP: Waiting for the pdb to be processed 07/29/23 17:08:40.3 -STEP: Waiting for all pods to be running 07/29/23 17:08:42.364 -Jul 29 17:08:42.373: INFO: running pods: 0 < 3 -[AfterEach] [sig-apps] DisruptionController +[It] should allow composing env vars into new env vars [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:44 +STEP: Creating a pod to test env composition 08/24/23 13:20:37.677 +Aug 24 13:20:37.689: INFO: Waiting up to 5m0s for pod "var-expansion-c9180157-660c-4f40-847b-5984136de4b4" in namespace "var-expansion-772" to be "Succeeded or Failed" +Aug 24 13:20:37.695: INFO: Pod "var-expansion-c9180157-660c-4f40-847b-5984136de4b4": Phase="Pending", Reason="", readiness=false. Elapsed: 5.485546ms +Aug 24 13:20:39.703: INFO: Pod "var-expansion-c9180157-660c-4f40-847b-5984136de4b4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01409494s +Aug 24 13:20:41.704: INFO: Pod "var-expansion-c9180157-660c-4f40-847b-5984136de4b4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014875568s +STEP: Saw pod success 08/24/23 13:20:41.704 +Aug 24 13:20:41.705: INFO: Pod "var-expansion-c9180157-660c-4f40-847b-5984136de4b4" satisfied condition "Succeeded or Failed" +Aug 24 13:20:41.714: INFO: Trying to get logs from node pe9deep4seen-3 pod var-expansion-c9180157-660c-4f40-847b-5984136de4b4 container dapi-container: +STEP: delete the pod 08/24/23 13:20:41.732 +Aug 24 13:20:41.760: INFO: Waiting for pod var-expansion-c9180157-660c-4f40-847b-5984136de4b4 to disappear +Aug 24 13:20:41.765: INFO: Pod var-expansion-c9180157-660c-4f40-847b-5984136de4b4 no longer exists +[AfterEach] [sig-node] Variable Expansion + test/e2e/framework/node/init/init.go:32 +Aug 24 13:20:41.765: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Variable Expansion + test/e2e/framework/metrics/init/init.go:33 +[DeferCleanup (Each)] [sig-node] Variable Expansion + dump namespaces | framework.go:196 +[DeferCleanup (Each)] [sig-node] Variable Expansion + tear down framework | framework.go:193 +STEP: Destroying namespace "var-expansion-772" for this suite. 08/24/23 13:20:41.775 +------------------------------ +• [4.148 seconds] +[sig-node] Variable Expansion +test/e2e/common/node/framework.go:23 + should allow composing env vars into new env vars [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:44 + + Begin Captured GinkgoWriter Output >> + [BeforeEach] [sig-node] Variable Expansion + set up framework | framework.go:178 + STEP: Creating a kubernetes client 08/24/23 13:20:37.641 + Aug 24 13:20:37.641: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename var-expansion 08/24/23 13:20:37.644 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:37.668 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:37.673 + [BeforeEach] [sig-node] Variable Expansion + test/e2e/framework/metrics/init/init.go:31 + [It] should allow composing env vars into new env vars [NodeConformance] [Conformance] + test/e2e/common/node/expansion.go:44 + STEP: Creating a pod to test env composition 08/24/23 13:20:37.677 + Aug 24 13:20:37.689: INFO: Waiting up to 5m0s for pod "var-expansion-c9180157-660c-4f40-847b-5984136de4b4" in namespace "var-expansion-772" to be "Succeeded or Failed" + Aug 24 13:20:37.695: INFO: Pod "var-expansion-c9180157-660c-4f40-847b-5984136de4b4": Phase="Pending", Reason="", readiness=false. Elapsed: 5.485546ms + Aug 24 13:20:39.703: INFO: Pod "var-expansion-c9180157-660c-4f40-847b-5984136de4b4": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01409494s + Aug 24 13:20:41.704: INFO: Pod "var-expansion-c9180157-660c-4f40-847b-5984136de4b4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014875568s + STEP: Saw pod success 08/24/23 13:20:41.704 + Aug 24 13:20:41.705: INFO: Pod "var-expansion-c9180157-660c-4f40-847b-5984136de4b4" satisfied condition "Succeeded or Failed" + Aug 24 13:20:41.714: INFO: Trying to get logs from node pe9deep4seen-3 pod var-expansion-c9180157-660c-4f40-847b-5984136de4b4 container dapi-container: + STEP: delete the pod 08/24/23 13:20:41.732 + Aug 24 13:20:41.760: INFO: Waiting for pod var-expansion-c9180157-660c-4f40-847b-5984136de4b4 to disappear + Aug 24 13:20:41.765: INFO: Pod var-expansion-c9180157-660c-4f40-847b-5984136de4b4 no longer exists + [AfterEach] [sig-node] Variable Expansion + test/e2e/framework/node/init/init.go:32 + Aug 24 13:20:41.765: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Variable Expansion + test/e2e/framework/metrics/init/init.go:33 + [DeferCleanup (Each)] [sig-node] Variable Expansion + dump namespaces | framework.go:196 + [DeferCleanup (Each)] [sig-node] Variable Expansion + tear down framework | framework.go:193 + STEP: Destroying namespace "var-expansion-772" for this suite. 08/24/23 13:20:41.775 + << End Captured GinkgoWriter Output +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Kubelet when scheduling a read only busybox container + should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:184 +[BeforeEach] [sig-node] Kubelet + set up framework | framework.go:178 +STEP: Creating a kubernetes client 08/24/23 13:20:41.794 +Aug 24 13:20:41.794: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename kubelet-test 08/24/23 13:20:41.797 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:41.844 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:41.848 +[BeforeEach] [sig-node] Kubelet + test/e2e/framework/metrics/init/init.go:31 +[BeforeEach] [sig-node] Kubelet + test/e2e/common/node/kubelet.go:41 +[It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:184 +Aug 24 13:20:41.865: INFO: Waiting up to 5m0s for pod "busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db" in namespace "kubelet-test-4584" to be "running and ready" +Aug 24 13:20:41.874: INFO: Pod "busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db": Phase="Pending", Reason="", readiness=false. Elapsed: 8.500996ms +Aug 24 13:20:41.874: INFO: The phase of Pod busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db is Pending, waiting for it to be Running (with Ready = true) +Aug 24 13:20:43.884: INFO: Pod "busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db": Phase="Running", Reason="", readiness=true. Elapsed: 2.019229791s +Aug 24 13:20:43.885: INFO: The phase of Pod busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db is Running (Ready = true) +Aug 24 13:20:43.885: INFO: Pod "busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db" satisfied condition "running and ready" +[AfterEach] [sig-node] Kubelet test/e2e/framework/node/init/init.go:32 -Jul 29 17:08:44.390: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] DisruptionController +Aug 24 13:20:43.903: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] DisruptionController +[DeferCleanup (Each)] [sig-node] Kubelet dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] DisruptionController +[DeferCleanup (Each)] [sig-node] Kubelet tear down framework | framework.go:193 -STEP: Destroying namespace "disruption-7066" for this suite. 07/29/23 17:08:44.4 +STEP: Destroying namespace "kubelet-test-4584" for this suite. 08/24/23 13:20:43.914 ------------------------------ -• [4.176 seconds] -[sig-apps] DisruptionController -test/e2e/apps/framework.go:23 - should observe PodDisruptionBudget status updated [Conformance] - test/e2e/apps/disruption.go:141 +• [2.135 seconds] +[sig-node] Kubelet +test/e2e/common/node/framework.go:23 + when scheduling a read only busybox container + test/e2e/common/node/kubelet.go:175 + should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:184 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] DisruptionController + [BeforeEach] [sig-node] Kubelet set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:08:40.236 - Jul 29 17:08:40.236: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename disruption 07/29/23 17:08:40.238 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:08:40.283 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:08:40.288 - [BeforeEach] [sig-apps] DisruptionController + STEP: Creating a kubernetes client 08/24/23 13:20:41.794 + Aug 24 13:20:41.794: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename kubelet-test 08/24/23 13:20:41.797 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:41.844 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:41.848 + [BeforeEach] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-apps] DisruptionController - test/e2e/apps/disruption.go:72 - [It] should observe PodDisruptionBudget status updated [Conformance] - test/e2e/apps/disruption.go:141 - STEP: Waiting for the pdb to be processed 07/29/23 17:08:40.3 - STEP: Waiting for all pods to be running 07/29/23 17:08:42.364 - Jul 29 17:08:42.373: INFO: running pods: 0 < 3 - [AfterEach] [sig-apps] DisruptionController + [BeforeEach] [sig-node] Kubelet + test/e2e/common/node/kubelet.go:41 + [It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/kubelet.go:184 + Aug 24 13:20:41.865: INFO: Waiting up to 5m0s for pod "busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db" in namespace "kubelet-test-4584" to be "running and ready" + Aug 24 13:20:41.874: INFO: Pod "busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db": Phase="Pending", Reason="", readiness=false. Elapsed: 8.500996ms + Aug 24 13:20:41.874: INFO: The phase of Pod busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db is Pending, waiting for it to be Running (with Ready = true) + Aug 24 13:20:43.884: INFO: Pod "busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db": Phase="Running", Reason="", readiness=true. Elapsed: 2.019229791s + Aug 24 13:20:43.885: INFO: The phase of Pod busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db is Running (Ready = true) + Aug 24 13:20:43.885: INFO: Pod "busybox-readonly-fs0d57b064-b1bf-4e40-b955-86c324e2b2db" satisfied condition "running and ready" + [AfterEach] [sig-node] Kubelet test/e2e/framework/node/init/init.go:32 - Jul 29 17:08:44.390: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] DisruptionController + Aug 24 13:20:43.903: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Kubelet test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] DisruptionController + [DeferCleanup (Each)] [sig-node] Kubelet dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] DisruptionController + [DeferCleanup (Each)] [sig-node] Kubelet tear down framework | framework.go:193 - STEP: Destroying namespace "disruption-7066" for this suite. 07/29/23 17:08:44.4 + STEP: Destroying namespace "kubelet-test-4584" for this suite. 08/24/23 13:20:43.914 << End Captured GinkgoWriter Output ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should mutate custom resource with pruning [Conformance] - test/e2e/apimachinery/webhook.go:341 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +SSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:46 +[BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:08:44.412 -Jul 29 17:08:44.412: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 17:08:44.415 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:08:44.451 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:08:44.456 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 13:20:43.931 +Aug 24 13:20:43.931: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 13:20:43.933 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:43.959 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:43.964 +[BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 17:08:44.482 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 17:08:45.399 -STEP: Deploying the webhook pod 07/29/23 17:08:45.411 -STEP: Wait for the deployment to be ready 07/29/23 17:08:45.433 -Jul 29 17:08:45.444: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created -STEP: Deploying the webhook service 07/29/23 17:08:47.466 -STEP: Verifying the service has paired with the endpoint 07/29/23 17:08:47.482 -Jul 29 17:08:48.483: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should mutate custom resource with pruning [Conformance] - test/e2e/apimachinery/webhook.go:341 -Jul 29 17:08:48.490: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Registering the mutating webhook for custom resource e2e-test-webhook-6690-crds.webhook.example.com via the AdmissionRegistration API 07/29/23 17:08:49.024 -STEP: Creating a custom resource that should be mutated by the webhook 07/29/23 17:08:49.052 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:46 +STEP: Creating projection with secret that has name projected-secret-test-c1e017cc-2fe3-4740-b03d-1b86315e8447 08/24/23 13:20:43.968 +STEP: Creating a pod to test consume secrets 08/24/23 13:20:43.976 +Aug 24 13:20:43.991: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766" in namespace "projected-5011" to be "Succeeded or Failed" +Aug 24 13:20:43.998: INFO: Pod "pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766": Phase="Pending", Reason="", readiness=false. Elapsed: 6.163449ms +Aug 24 13:20:46.016: INFO: Pod "pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024111761s +Aug 24 13:20:48.020: INFO: Pod "pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028611996s +STEP: Saw pod success 08/24/23 13:20:48.022 +Aug 24 13:20:48.024: INFO: Pod "pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766" satisfied condition "Succeeded or Failed" +Aug 24 13:20:48.034: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766 container projected-secret-volume-test: +STEP: delete the pod 08/24/23 13:20:48.049 +Aug 24 13:20:48.068: INFO: Waiting for pod pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766 to disappear +Aug 24 13:20:48.074: INFO: Pod pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766 no longer exists +[AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 -Jul 29 17:08:51.859: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 13:20:48.074: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-2806" for this suite. 07/29/23 17:08:52.035 -STEP: Destroying namespace "webhook-2806-markers" for this suite. 07/29/23 17:08:52.048 +STEP: Destroying namespace "projected-5011" for this suite. 08/24/23 13:20:48.087 ------------------------------ -• [SLOW TEST] [7.650 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should mutate custom resource with pruning [Conformance] - test/e2e/apimachinery/webhook.go:341 +• [4.169 seconds] +[sig-storage] Projected secret +test/e2e/common/storage/framework.go:23 + should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:46 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-storage] Projected secret set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:08:44.412 - Jul 29 17:08:44.412: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 17:08:44.415 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:08:44.451 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:08:44.456 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 13:20:43.931 + Aug 24 13:20:43.931: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 13:20:43.933 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:43.959 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:43.964 + [BeforeEach] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 17:08:44.482 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 17:08:45.399 - STEP: Deploying the webhook pod 07/29/23 17:08:45.411 - STEP: Wait for the deployment to be ready 07/29/23 17:08:45.433 - Jul 29 17:08:45.444: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created - STEP: Deploying the webhook service 07/29/23 17:08:47.466 - STEP: Verifying the service has paired with the endpoint 07/29/23 17:08:47.482 - Jul 29 17:08:48.483: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should mutate custom resource with pruning [Conformance] - test/e2e/apimachinery/webhook.go:341 - Jul 29 17:08:48.490: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Registering the mutating webhook for custom resource e2e-test-webhook-6690-crds.webhook.example.com via the AdmissionRegistration API 07/29/23 17:08:49.024 - STEP: Creating a custom resource that should be mutated by the webhook 07/29/23 17:08:49.052 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [It] should be consumable from pods in volume [NodeConformance] [Conformance] + test/e2e/common/storage/projected_secret.go:46 + STEP: Creating projection with secret that has name projected-secret-test-c1e017cc-2fe3-4740-b03d-1b86315e8447 08/24/23 13:20:43.968 + STEP: Creating a pod to test consume secrets 08/24/23 13:20:43.976 + Aug 24 13:20:43.991: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766" in namespace "projected-5011" to be "Succeeded or Failed" + Aug 24 13:20:43.998: INFO: Pod "pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766": Phase="Pending", Reason="", readiness=false. Elapsed: 6.163449ms + Aug 24 13:20:46.016: INFO: Pod "pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024111761s + Aug 24 13:20:48.020: INFO: Pod "pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028611996s + STEP: Saw pod success 08/24/23 13:20:48.022 + Aug 24 13:20:48.024: INFO: Pod "pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766" satisfied condition "Succeeded or Failed" + Aug 24 13:20:48.034: INFO: Trying to get logs from node pe9deep4seen-3 pod pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766 container projected-secret-volume-test: + STEP: delete the pod 08/24/23 13:20:48.049 + Aug 24 13:20:48.068: INFO: Waiting for pod pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766 to disappear + Aug 24 13:20:48.074: INFO: Pod pod-projected-secrets-70cf03af-f4cf-435a-a945-011e6633b766 no longer exists + [AfterEach] [sig-storage] Projected secret test/e2e/framework/node/init/init.go:32 - Jul 29 17:08:51.859: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 13:20:48.074: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected secret test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-storage] Projected secret dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-storage] Projected secret tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-2806" for this suite. 07/29/23 17:08:52.035 - STEP: Destroying namespace "webhook-2806-markers" for this suite. 07/29/23 17:08:52.048 + STEP: Destroying namespace "projected-5011" for this suite. 08/24/23 13:20:48.087 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSS ------------------------------ -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - should honor timeout [Conformance] - test/e2e/apimachinery/webhook.go:381 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[sig-node] Security Context when creating containers with AllowPrivilegeEscalation + should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:609 +[BeforeEach] [sig-node] Security Context set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:08:52.064 -Jul 29 17:08:52.064: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename webhook 07/29/23 17:08:52.069 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:08:52.148 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:08:52.154 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +STEP: Creating a kubernetes client 08/24/23 13:20:48.102 +Aug 24 13:20:48.103: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename security-context-test 08/24/23 13:20:48.105 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:48.129 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:48.134 +[BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 -[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 -STEP: Setting up server cert 07/29/23 17:08:52.182 -STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 17:08:52.634 -STEP: Deploying the webhook pod 07/29/23 17:08:52.645 -STEP: Wait for the deployment to be ready 07/29/23 17:08:52.661 -Jul 29 17:08:52.673: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set -STEP: Deploying the webhook service 07/29/23 17:08:54.695 -STEP: Verifying the service has paired with the endpoint 07/29/23 17:08:54.71 -Jul 29 17:08:55.712: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 -[It] should honor timeout [Conformance] - test/e2e/apimachinery/webhook.go:381 -STEP: Setting timeout (1s) shorter than webhook latency (5s) 07/29/23 17:08:55.723 -STEP: Registering slow webhook via the AdmissionRegistration API 07/29/23 17:08:55.723 -STEP: Request fails when timeout (1s) is shorter than slow webhook latency (5s) 07/29/23 17:08:55.785 -STEP: Having no error when timeout is shorter than webhook latency and failure policy is ignore 07/29/23 17:08:56.808 -STEP: Registering slow webhook via the AdmissionRegistration API 07/29/23 17:08:56.809 -STEP: Having no error when timeout is longer than webhook latency 07/29/23 17:08:57.856 -STEP: Registering slow webhook via the AdmissionRegistration API 07/29/23 17:08:57.857 -STEP: Having no error when timeout is empty (defaulted to 10s in v1) 07/29/23 17:09:02.931 -STEP: Registering slow webhook via the AdmissionRegistration API 07/29/23 17:09:02.932 -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[BeforeEach] [sig-node] Security Context + test/e2e/common/node/security_context.go:50 +[It] should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:609 +Aug 24 13:20:48.154: INFO: Waiting up to 5m0s for pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c" in namespace "security-context-test-2078" to be "Succeeded or Failed" +Aug 24 13:20:48.161: INFO: Pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c": Phase="Pending", Reason="", readiness=false. Elapsed: 7.120946ms +Aug 24 13:20:50.171: INFO: Pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017270883s +Aug 24 13:20:52.171: INFO: Pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.016444674s +Aug 24 13:20:54.169: INFO: Pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.01472697s +Aug 24 13:20:54.169: INFO: Pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c" satisfied condition "Succeeded or Failed" +[AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 -Jul 29 17:09:07.980: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +Aug 24 13:20:54.183: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +[DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 -STEP: Destroying namespace "webhook-5081" for this suite. 07/29/23 17:09:08.06 -STEP: Destroying namespace "webhook-5081-markers" for this suite. 07/29/23 17:09:08.08 +STEP: Destroying namespace "security-context-test-2078" for this suite. 08/24/23 13:20:54.192 ------------------------------ -• [SLOW TEST] [16.030 seconds] -[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] -test/e2e/apimachinery/framework.go:23 - should honor timeout [Conformance] - test/e2e/apimachinery/webhook.go:381 +• [SLOW TEST] [6.104 seconds] +[sig-node] Security Context +test/e2e/common/node/framework.go:23 + when creating containers with AllowPrivilegeEscalation + test/e2e/common/node/security_context.go:555 + should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:609 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Security Context set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:08:52.064 - Jul 29 17:08:52.064: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename webhook 07/29/23 17:08:52.069 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:08:52.148 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:08:52.154 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + STEP: Creating a kubernetes client 08/24/23 13:20:48.102 + Aug 24 13:20:48.103: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename security-context-test 08/24/23 13:20:48.105 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:48.129 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:48.134 + [BeforeEach] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:31 - [BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:90 - STEP: Setting up server cert 07/29/23 17:08:52.182 - STEP: Create role binding to let webhook read extension-apiserver-authentication 07/29/23 17:08:52.634 - STEP: Deploying the webhook pod 07/29/23 17:08:52.645 - STEP: Wait for the deployment to be ready 07/29/23 17:08:52.661 - Jul 29 17:08:52.673: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set - STEP: Deploying the webhook service 07/29/23 17:08:54.695 - STEP: Verifying the service has paired with the endpoint 07/29/23 17:08:54.71 - Jul 29 17:08:55.712: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 - [It] should honor timeout [Conformance] - test/e2e/apimachinery/webhook.go:381 - STEP: Setting timeout (1s) shorter than webhook latency (5s) 07/29/23 17:08:55.723 - STEP: Registering slow webhook via the AdmissionRegistration API 07/29/23 17:08:55.723 - STEP: Request fails when timeout (1s) is shorter than slow webhook latency (5s) 07/29/23 17:08:55.785 - STEP: Having no error when timeout is shorter than webhook latency and failure policy is ignore 07/29/23 17:08:56.808 - STEP: Registering slow webhook via the AdmissionRegistration API 07/29/23 17:08:56.809 - STEP: Having no error when timeout is longer than webhook latency 07/29/23 17:08:57.856 - STEP: Registering slow webhook via the AdmissionRegistration API 07/29/23 17:08:57.857 - STEP: Having no error when timeout is empty (defaulted to 10s in v1) 07/29/23 17:09:02.931 - STEP: Registering slow webhook via the AdmissionRegistration API 07/29/23 17:09:02.932 - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [BeforeEach] [sig-node] Security Context + test/e2e/common/node/security_context.go:50 + [It] should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] + test/e2e/common/node/security_context.go:609 + Aug 24 13:20:48.154: INFO: Waiting up to 5m0s for pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c" in namespace "security-context-test-2078" to be "Succeeded or Failed" + Aug 24 13:20:48.161: INFO: Pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c": Phase="Pending", Reason="", readiness=false. Elapsed: 7.120946ms + Aug 24 13:20:50.171: INFO: Pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017270883s + Aug 24 13:20:52.171: INFO: Pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.016444674s + Aug 24 13:20:54.169: INFO: Pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.01472697s + Aug 24 13:20:54.169: INFO: Pod "alpine-nnp-false-172bdf90-9149-4bb8-8da8-0b667366894c" satisfied condition "Succeeded or Failed" + [AfterEach] [sig-node] Security Context test/e2e/framework/node/init/init.go:32 - Jul 29 17:09:07.980: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] - test/e2e/apimachinery/webhook.go:105 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + Aug 24 13:20:54.183: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Security Context test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Security Context dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + [DeferCleanup (Each)] [sig-node] Security Context tear down framework | framework.go:193 - STEP: Destroying namespace "webhook-5081" for this suite. 07/29/23 17:09:08.06 - STEP: Destroying namespace "webhook-5081-markers" for this suite. 07/29/23 17:09:08.08 + STEP: Destroying namespace "security-context-test-2078" for this suite. 08/24/23 13:20:54.192 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSS +SSS ------------------------------ -[sig-apps] CronJob - should support CronJob API operations [Conformance] - test/e2e/apps/cronjob.go:319 -[BeforeEach] [sig-apps] CronJob +[sig-node] Downward API + should provide host IP as an env var [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:90 +[BeforeEach] [sig-node] Downward API set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:09:08.114 -Jul 29 17:09:08.114: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename cronjob 07/29/23 17:09:08.131 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:08.19 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:08.197 -[BeforeEach] [sig-apps] CronJob +STEP: Creating a kubernetes client 08/24/23 13:20:54.207 +Aug 24 13:20:54.207: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename downward-api 08/24/23 13:20:54.21 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:54.242 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:54.252 +[BeforeEach] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:31 -[It] should support CronJob API operations [Conformance] - test/e2e/apps/cronjob.go:319 -STEP: Creating a cronjob 07/29/23 17:09:08.201 -STEP: creating 07/29/23 17:09:08.201 -STEP: getting 07/29/23 17:09:08.214 -STEP: listing 07/29/23 17:09:08.221 -STEP: watching 07/29/23 17:09:08.23 -Jul 29 17:09:08.230: INFO: starting watch -STEP: cluster-wide listing 07/29/23 17:09:08.235 -STEP: cluster-wide watching 07/29/23 17:09:08.242 -Jul 29 17:09:08.242: INFO: starting watch -STEP: patching 07/29/23 17:09:08.246 -STEP: updating 07/29/23 17:09:08.259 -Jul 29 17:09:08.275: INFO: waiting for watch events with expected annotations -Jul 29 17:09:08.275: INFO: saw patched and updated annotations -STEP: patching /status 07/29/23 17:09:08.275 -STEP: updating /status 07/29/23 17:09:08.301 -STEP: get /status 07/29/23 17:09:08.318 -STEP: deleting 07/29/23 17:09:08.329 -STEP: deleting a collection 07/29/23 17:09:08.364 -[AfterEach] [sig-apps] CronJob +[It] should provide host IP as an env var [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:90 +STEP: Creating a pod to test downward api env vars 08/24/23 13:20:54.264 +Aug 24 13:20:54.281: INFO: Waiting up to 5m0s for pod "downward-api-8d121c71-8b32-4422-b3ca-39df272c86df" in namespace "downward-api-5088" to be "Succeeded or Failed" +Aug 24 13:20:54.287: INFO: Pod "downward-api-8d121c71-8b32-4422-b3ca-39df272c86df": Phase="Pending", Reason="", readiness=false. Elapsed: 5.983488ms +Aug 24 13:20:56.294: INFO: Pod "downward-api-8d121c71-8b32-4422-b3ca-39df272c86df": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013005182s +Aug 24 13:20:58.294: INFO: Pod "downward-api-8d121c71-8b32-4422-b3ca-39df272c86df": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012511719s +STEP: Saw pod success 08/24/23 13:20:58.294 +Aug 24 13:20:58.294: INFO: Pod "downward-api-8d121c71-8b32-4422-b3ca-39df272c86df" satisfied condition "Succeeded or Failed" +Aug 24 13:20:58.300: INFO: Trying to get logs from node pe9deep4seen-3 pod downward-api-8d121c71-8b32-4422-b3ca-39df272c86df container dapi-container: +STEP: delete the pod 08/24/23 13:20:58.317 +Aug 24 13:20:58.337: INFO: Waiting for pod downward-api-8d121c71-8b32-4422-b3ca-39df272c86df to disappear +Aug 24 13:20:58.342: INFO: Pod downward-api-8d121c71-8b32-4422-b3ca-39df272c86df no longer exists +[AfterEach] [sig-node] Downward API test/e2e/framework/node/init/init.go:32 -Jul 29 17:09:08.394: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-apps] CronJob +Aug 24 13:20:58.342: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-apps] CronJob +[DeferCleanup (Each)] [sig-node] Downward API dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-apps] CronJob +[DeferCleanup (Each)] [sig-node] Downward API tear down framework | framework.go:193 -STEP: Destroying namespace "cronjob-4722" for this suite. 07/29/23 17:09:08.41 +STEP: Destroying namespace "downward-api-5088" for this suite. 08/24/23 13:20:58.351 ------------------------------ -• [0.311 seconds] -[sig-apps] CronJob -test/e2e/apps/framework.go:23 - should support CronJob API operations [Conformance] - test/e2e/apps/cronjob.go:319 +• [4.157 seconds] +[sig-node] Downward API +test/e2e/common/node/framework.go:23 + should provide host IP as an env var [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:90 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-apps] CronJob + [BeforeEach] [sig-node] Downward API set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:09:08.114 - Jul 29 17:09:08.114: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename cronjob 07/29/23 17:09:08.131 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:08.19 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:08.197 - [BeforeEach] [sig-apps] CronJob + STEP: Creating a kubernetes client 08/24/23 13:20:54.207 + Aug 24 13:20:54.207: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename downward-api 08/24/23 13:20:54.21 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:54.242 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:54.252 + [BeforeEach] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:31 - [It] should support CronJob API operations [Conformance] - test/e2e/apps/cronjob.go:319 - STEP: Creating a cronjob 07/29/23 17:09:08.201 - STEP: creating 07/29/23 17:09:08.201 - STEP: getting 07/29/23 17:09:08.214 - STEP: listing 07/29/23 17:09:08.221 - STEP: watching 07/29/23 17:09:08.23 - Jul 29 17:09:08.230: INFO: starting watch - STEP: cluster-wide listing 07/29/23 17:09:08.235 - STEP: cluster-wide watching 07/29/23 17:09:08.242 - Jul 29 17:09:08.242: INFO: starting watch - STEP: patching 07/29/23 17:09:08.246 - STEP: updating 07/29/23 17:09:08.259 - Jul 29 17:09:08.275: INFO: waiting for watch events with expected annotations - Jul 29 17:09:08.275: INFO: saw patched and updated annotations - STEP: patching /status 07/29/23 17:09:08.275 - STEP: updating /status 07/29/23 17:09:08.301 - STEP: get /status 07/29/23 17:09:08.318 - STEP: deleting 07/29/23 17:09:08.329 - STEP: deleting a collection 07/29/23 17:09:08.364 - [AfterEach] [sig-apps] CronJob + [It] should provide host IP as an env var [NodeConformance] [Conformance] + test/e2e/common/node/downwardapi.go:90 + STEP: Creating a pod to test downward api env vars 08/24/23 13:20:54.264 + Aug 24 13:20:54.281: INFO: Waiting up to 5m0s for pod "downward-api-8d121c71-8b32-4422-b3ca-39df272c86df" in namespace "downward-api-5088" to be "Succeeded or Failed" + Aug 24 13:20:54.287: INFO: Pod "downward-api-8d121c71-8b32-4422-b3ca-39df272c86df": Phase="Pending", Reason="", readiness=false. Elapsed: 5.983488ms + Aug 24 13:20:56.294: INFO: Pod "downward-api-8d121c71-8b32-4422-b3ca-39df272c86df": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013005182s + Aug 24 13:20:58.294: INFO: Pod "downward-api-8d121c71-8b32-4422-b3ca-39df272c86df": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.012511719s + STEP: Saw pod success 08/24/23 13:20:58.294 + Aug 24 13:20:58.294: INFO: Pod "downward-api-8d121c71-8b32-4422-b3ca-39df272c86df" satisfied condition "Succeeded or Failed" + Aug 24 13:20:58.300: INFO: Trying to get logs from node pe9deep4seen-3 pod downward-api-8d121c71-8b32-4422-b3ca-39df272c86df container dapi-container: + STEP: delete the pod 08/24/23 13:20:58.317 + Aug 24 13:20:58.337: INFO: Waiting for pod downward-api-8d121c71-8b32-4422-b3ca-39df272c86df to disappear + Aug 24 13:20:58.342: INFO: Pod downward-api-8d121c71-8b32-4422-b3ca-39df272c86df no longer exists + [AfterEach] [sig-node] Downward API test/e2e/framework/node/init/init.go:32 - Jul 29 17:09:08.394: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-apps] CronJob + Aug 24 13:20:58.342: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-node] Downward API test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-apps] CronJob + [DeferCleanup (Each)] [sig-node] Downward API dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-apps] CronJob + [DeferCleanup (Each)] [sig-node] Downward API tear down framework | framework.go:193 - STEP: Destroying namespace "cronjob-4722" for this suite. 07/29/23 17:09:08.41 + STEP: Destroying namespace "downward-api-5088" for this suite. 08/24/23 13:20:58.351 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Security Context - should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] - test/e2e/node/security_context.go:164 -[BeforeEach] [sig-node] Security Context +[sig-storage] Projected combined + should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + test/e2e/common/storage/projected_combined.go:44 +[BeforeEach] [sig-storage] Projected combined set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:09:08.439 -Jul 29 17:09:08.442: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename security-context 07/29/23 17:09:08.445 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:08.473 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:08.481 -[BeforeEach] [sig-node] Security Context +STEP: Creating a kubernetes client 08/24/23 13:20:58.372 +Aug 24 13:20:58.372: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 13:20:58.373 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:58.404 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:58.41 +[BeforeEach] [sig-storage] Projected combined test/e2e/framework/metrics/init/init.go:31 -[It] should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] - test/e2e/node/security_context.go:164 -STEP: Creating a pod to test pod.Spec.SecurityContext.RunAsUser 07/29/23 17:09:08.486 -Jul 29 17:09:08.501: INFO: Waiting up to 5m0s for pod "security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c" in namespace "security-context-291" to be "Succeeded or Failed" -Jul 29 17:09:08.508: INFO: Pod "security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c": Phase="Pending", Reason="", readiness=false. Elapsed: 7.324777ms -Jul 29 17:09:10.518: INFO: Pod "security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016581907s -Jul 29 17:09:12.528: INFO: Pod "security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027316699s -STEP: Saw pod success 07/29/23 17:09:12.528 -Jul 29 17:09:12.529: INFO: Pod "security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c" satisfied condition "Succeeded or Failed" -Jul 29 17:09:12.536: INFO: Trying to get logs from node wetuj3nuajog-3 pod security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c container test-container: -STEP: delete the pod 07/29/23 17:09:12.548 -Jul 29 17:09:12.568: INFO: Waiting for pod security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c to disappear -Jul 29 17:09:12.575: INFO: Pod security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c no longer exists -[AfterEach] [sig-node] Security Context +[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + test/e2e/common/storage/projected_combined.go:44 +STEP: Creating configMap with name configmap-projected-all-test-volume-08d03ea5-0a9e-4203-9671-3c92224ff37d 08/24/23 13:20:58.416 +STEP: Creating secret with name secret-projected-all-test-volume-99946c94-9d2f-4c90-b1e0-9584a4190f29 08/24/23 13:20:58.425 +STEP: Creating a pod to test Check all projections for projected volume plugin 08/24/23 13:20:58.431 +Aug 24 13:20:58.445: INFO: Waiting up to 5m0s for pod "projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd" in namespace "projected-958" to be "Succeeded or Failed" +Aug 24 13:20:58.456: INFO: Pod "projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd": Phase="Pending", Reason="", readiness=false. Elapsed: 10.651599ms +Aug 24 13:21:00.463: INFO: Pod "projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018404821s +Aug 24 13:21:02.465: INFO: Pod "projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020335641s +STEP: Saw pod success 08/24/23 13:21:02.465 +Aug 24 13:21:02.466: INFO: Pod "projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd" satisfied condition "Succeeded or Failed" +Aug 24 13:21:02.473: INFO: Trying to get logs from node pe9deep4seen-3 pod projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd container projected-all-volume-test: +STEP: delete the pod 08/24/23 13:21:02.485 +Aug 24 13:21:02.505: INFO: Waiting for pod projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd to disappear +Aug 24 13:21:02.512: INFO: Pod projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd no longer exists +[AfterEach] [sig-storage] Projected combined test/e2e/framework/node/init/init.go:32 -Jul 29 17:09:12.575: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Security Context +Aug 24 13:21:02.512: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected combined test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Security Context +[DeferCleanup (Each)] [sig-storage] Projected combined dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Security Context +[DeferCleanup (Each)] [sig-storage] Projected combined tear down framework | framework.go:193 -STEP: Destroying namespace "security-context-291" for this suite. 07/29/23 17:09:12.581 +STEP: Destroying namespace "projected-958" for this suite. 08/24/23 13:21:02.52 ------------------------------ -• [4.199 seconds] -[sig-node] Security Context -test/e2e/node/framework.go:23 - should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] - test/e2e/node/security_context.go:164 +• [4.164 seconds] +[sig-storage] Projected combined +test/e2e/common/storage/framework.go:23 + should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + test/e2e/common/storage/projected_combined.go:44 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Security Context + [BeforeEach] [sig-storage] Projected combined set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:09:08.439 - Jul 29 17:09:08.442: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename security-context 07/29/23 17:09:08.445 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:08.473 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:08.481 - [BeforeEach] [sig-node] Security Context + STEP: Creating a kubernetes client 08/24/23 13:20:58.372 + Aug 24 13:20:58.372: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 13:20:58.373 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:20:58.404 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:20:58.41 + [BeforeEach] [sig-storage] Projected combined test/e2e/framework/metrics/init/init.go:31 - [It] should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] - test/e2e/node/security_context.go:164 - STEP: Creating a pod to test pod.Spec.SecurityContext.RunAsUser 07/29/23 17:09:08.486 - Jul 29 17:09:08.501: INFO: Waiting up to 5m0s for pod "security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c" in namespace "security-context-291" to be "Succeeded or Failed" - Jul 29 17:09:08.508: INFO: Pod "security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c": Phase="Pending", Reason="", readiness=false. Elapsed: 7.324777ms - Jul 29 17:09:10.518: INFO: Pod "security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016581907s - Jul 29 17:09:12.528: INFO: Pod "security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027316699s - STEP: Saw pod success 07/29/23 17:09:12.528 - Jul 29 17:09:12.529: INFO: Pod "security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c" satisfied condition "Succeeded or Failed" - Jul 29 17:09:12.536: INFO: Trying to get logs from node wetuj3nuajog-3 pod security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c container test-container: - STEP: delete the pod 07/29/23 17:09:12.548 - Jul 29 17:09:12.568: INFO: Waiting for pod security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c to disappear - Jul 29 17:09:12.575: INFO: Pod security-context-cb9d6ffd-71ec-4a56-8da9-a1b2ff27690c no longer exists - [AfterEach] [sig-node] Security Context + [It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + test/e2e/common/storage/projected_combined.go:44 + STEP: Creating configMap with name configmap-projected-all-test-volume-08d03ea5-0a9e-4203-9671-3c92224ff37d 08/24/23 13:20:58.416 + STEP: Creating secret with name secret-projected-all-test-volume-99946c94-9d2f-4c90-b1e0-9584a4190f29 08/24/23 13:20:58.425 + STEP: Creating a pod to test Check all projections for projected volume plugin 08/24/23 13:20:58.431 + Aug 24 13:20:58.445: INFO: Waiting up to 5m0s for pod "projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd" in namespace "projected-958" to be "Succeeded or Failed" + Aug 24 13:20:58.456: INFO: Pod "projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd": Phase="Pending", Reason="", readiness=false. Elapsed: 10.651599ms + Aug 24 13:21:00.463: INFO: Pod "projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018404821s + Aug 24 13:21:02.465: INFO: Pod "projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020335641s + STEP: Saw pod success 08/24/23 13:21:02.465 + Aug 24 13:21:02.466: INFO: Pod "projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd" satisfied condition "Succeeded or Failed" + Aug 24 13:21:02.473: INFO: Trying to get logs from node pe9deep4seen-3 pod projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd container projected-all-volume-test: + STEP: delete the pod 08/24/23 13:21:02.485 + Aug 24 13:21:02.505: INFO: Waiting for pod projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd to disappear + Aug 24 13:21:02.512: INFO: Pod projected-volume-b27781ee-3226-45a0-9bb5-abd977a4eddd no longer exists + [AfterEach] [sig-storage] Projected combined test/e2e/framework/node/init/init.go:32 - Jul 29 17:09:12.575: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Security Context + Aug 24 13:21:02.512: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected combined test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Security Context + [DeferCleanup (Each)] [sig-storage] Projected combined dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Security Context + [DeferCleanup (Each)] [sig-storage] Projected combined tear down framework | framework.go:193 - STEP: Destroying namespace "security-context-291" for this suite. 07/29/23 17:09:12.581 + STEP: Destroying namespace "projected-958" for this suite. 08/24/23 13:21:02.52 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSS +SSSSSSSSSSSSS ------------------------------ -[sig-storage] Projected secret - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:78 -[BeforeEach] [sig-storage] Projected secret +[sig-auth] ServiceAccounts + ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] + test/e2e/auth/service_accounts.go:531 +[BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:09:12.641 -Jul 29 17:09:12.642: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename projected 07/29/23 17:09:12.644 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:12.705 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:12.71 -[BeforeEach] [sig-storage] Projected secret +STEP: Creating a kubernetes client 08/24/23 13:21:02.537 +Aug 24 13:21:02.537: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename svcaccounts 08/24/23 13:21:02.539 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:21:02.565 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:21:02.571 +[BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 -[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:78 -STEP: Creating projection with secret that has name projected-secret-test-map-5a91117d-c812-40fa-9a9c-1a7d91120a02 07/29/23 17:09:12.714 -STEP: Creating a pod to test consume secrets 07/29/23 17:09:12.725 -Jul 29 17:09:12.742: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c" in namespace "projected-3728" to be "Succeeded or Failed" -Jul 29 17:09:12.750: INFO: Pod "pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c": Phase="Pending", Reason="", readiness=false. Elapsed: 8.154789ms -Jul 29 17:09:14.759: INFO: Pod "pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017002533s -Jul 29 17:09:16.761: INFO: Pod "pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018417642s -STEP: Saw pod success 07/29/23 17:09:16.761 -Jul 29 17:09:16.761: INFO: Pod "pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c" satisfied condition "Succeeded or Failed" -Jul 29 17:09:16.766: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c container projected-secret-volume-test: -STEP: delete the pod 07/29/23 17:09:16.779 -Jul 29 17:09:16.802: INFO: Waiting for pod pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c to disappear -Jul 29 17:09:16.807: INFO: Pod pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c no longer exists -[AfterEach] [sig-storage] Projected secret +[It] ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] + test/e2e/auth/service_accounts.go:531 +Aug 24 13:21:02.600: INFO: created pod +Aug 24 13:21:02.600: INFO: Waiting up to 5m0s for pod "oidc-discovery-validator" in namespace "svcaccounts-6889" to be "Succeeded or Failed" +Aug 24 13:21:02.607: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 6.701185ms +Aug 24 13:21:04.620: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019793588s +Aug 24 13:21:06.617: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 4.017045463s +Aug 24 13:21:08.617: INFO: Pod "oidc-discovery-validator": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016739893s +STEP: Saw pod success 08/24/23 13:21:08.617 +Aug 24 13:21:08.617: INFO: Pod "oidc-discovery-validator" satisfied condition "Succeeded or Failed" +Aug 24 13:21:38.618: INFO: polling logs +Aug 24 13:21:38.634: INFO: Pod logs: +I0824 13:21:03.720044 1 log.go:198] OK: Got token +I0824 13:21:03.720310 1 log.go:198] validating with in-cluster discovery +I0824 13:21:03.722207 1 log.go:198] OK: got issuer https://kubernetes.default.svc.cluster.local +I0824 13:21:03.722279 1 log.go:198] Full, not-validated claims: +openidmetadata.claims{Claims:jwt.Claims{Issuer:"https://kubernetes.default.svc.cluster.local", Subject:"system:serviceaccount:svcaccounts-6889:default", Audience:jwt.Audience{"oidc-discovery-test"}, Expiry:1692883862, NotBefore:1692883262, IssuedAt:1692883262, ID:""}, Kubernetes:openidmetadata.kubeClaims{Namespace:"svcaccounts-6889", ServiceAccount:openidmetadata.kubeName{Name:"default", UID:"ef028ed8-2f14-4257-b226-5c83301a9727"}}} +I0824 13:21:03.755495 1 log.go:198] OK: Constructed OIDC provider for issuer https://kubernetes.default.svc.cluster.local +I0824 13:21:03.765996 1 log.go:198] OK: Validated signature on JWT +I0824 13:21:03.766193 1 log.go:198] OK: Got valid claims from token! +I0824 13:21:03.766240 1 log.go:198] Full, validated claims: +&openidmetadata.claims{Claims:jwt.Claims{Issuer:"https://kubernetes.default.svc.cluster.local", Subject:"system:serviceaccount:svcaccounts-6889:default", Audience:jwt.Audience{"oidc-discovery-test"}, Expiry:1692883862, NotBefore:1692883262, IssuedAt:1692883262, ID:""}, Kubernetes:openidmetadata.kubeClaims{Namespace:"svcaccounts-6889", ServiceAccount:openidmetadata.kubeName{Name:"default", UID:"ef028ed8-2f14-4257-b226-5c83301a9727"}}} + +Aug 24 13:21:38.634: INFO: completed pod +[AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 -Jul 29 17:09:16.807: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] Projected secret +Aug 24 13:21:38.646: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] Projected secret +[DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] Projected secret +[DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 -STEP: Destroying namespace "projected-3728" for this suite. 07/29/23 17:09:16.815 +STEP: Destroying namespace "svcaccounts-6889" for this suite. 08/24/23 13:21:38.655 ------------------------------ -• [4.185 seconds] -[sig-storage] Projected secret -test/e2e/common/storage/framework.go:23 - should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:78 +• [SLOW TEST] [36.130 seconds] +[sig-auth] ServiceAccounts +test/e2e/auth/framework.go:23 + ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] + test/e2e/auth/service_accounts.go:531 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] Projected secret + [BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:09:12.641 - Jul 29 17:09:12.642: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename projected 07/29/23 17:09:12.644 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:12.705 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:12.71 - [BeforeEach] [sig-storage] Projected secret + STEP: Creating a kubernetes client 08/24/23 13:21:02.537 + Aug 24 13:21:02.537: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename svcaccounts 08/24/23 13:21:02.539 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:21:02.565 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:21:02.571 + [BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 - [It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] - test/e2e/common/storage/projected_secret.go:78 - STEP: Creating projection with secret that has name projected-secret-test-map-5a91117d-c812-40fa-9a9c-1a7d91120a02 07/29/23 17:09:12.714 - STEP: Creating a pod to test consume secrets 07/29/23 17:09:12.725 - Jul 29 17:09:12.742: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c" in namespace "projected-3728" to be "Succeeded or Failed" - Jul 29 17:09:12.750: INFO: Pod "pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c": Phase="Pending", Reason="", readiness=false. Elapsed: 8.154789ms - Jul 29 17:09:14.759: INFO: Pod "pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017002533s - Jul 29 17:09:16.761: INFO: Pod "pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018417642s - STEP: Saw pod success 07/29/23 17:09:16.761 - Jul 29 17:09:16.761: INFO: Pod "pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c" satisfied condition "Succeeded or Failed" - Jul 29 17:09:16.766: INFO: Trying to get logs from node wetuj3nuajog-3 pod pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c container projected-secret-volume-test: - STEP: delete the pod 07/29/23 17:09:16.779 - Jul 29 17:09:16.802: INFO: Waiting for pod pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c to disappear - Jul 29 17:09:16.807: INFO: Pod pod-projected-secrets-1081f8b6-aeb1-47a6-b109-f3c2f0a2440c no longer exists - [AfterEach] [sig-storage] Projected secret + [It] ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] + test/e2e/auth/service_accounts.go:531 + Aug 24 13:21:02.600: INFO: created pod + Aug 24 13:21:02.600: INFO: Waiting up to 5m0s for pod "oidc-discovery-validator" in namespace "svcaccounts-6889" to be "Succeeded or Failed" + Aug 24 13:21:02.607: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 6.701185ms + Aug 24 13:21:04.620: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019793588s + Aug 24 13:21:06.617: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 4.017045463s + Aug 24 13:21:08.617: INFO: Pod "oidc-discovery-validator": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.016739893s + STEP: Saw pod success 08/24/23 13:21:08.617 + Aug 24 13:21:08.617: INFO: Pod "oidc-discovery-validator" satisfied condition "Succeeded or Failed" + Aug 24 13:21:38.618: INFO: polling logs + Aug 24 13:21:38.634: INFO: Pod logs: + I0824 13:21:03.720044 1 log.go:198] OK: Got token + I0824 13:21:03.720310 1 log.go:198] validating with in-cluster discovery + I0824 13:21:03.722207 1 log.go:198] OK: got issuer https://kubernetes.default.svc.cluster.local + I0824 13:21:03.722279 1 log.go:198] Full, not-validated claims: + openidmetadata.claims{Claims:jwt.Claims{Issuer:"https://kubernetes.default.svc.cluster.local", Subject:"system:serviceaccount:svcaccounts-6889:default", Audience:jwt.Audience{"oidc-discovery-test"}, Expiry:1692883862, NotBefore:1692883262, IssuedAt:1692883262, ID:""}, Kubernetes:openidmetadata.kubeClaims{Namespace:"svcaccounts-6889", ServiceAccount:openidmetadata.kubeName{Name:"default", UID:"ef028ed8-2f14-4257-b226-5c83301a9727"}}} + I0824 13:21:03.755495 1 log.go:198] OK: Constructed OIDC provider for issuer https://kubernetes.default.svc.cluster.local + I0824 13:21:03.765996 1 log.go:198] OK: Validated signature on JWT + I0824 13:21:03.766193 1 log.go:198] OK: Got valid claims from token! + I0824 13:21:03.766240 1 log.go:198] Full, validated claims: + &openidmetadata.claims{Claims:jwt.Claims{Issuer:"https://kubernetes.default.svc.cluster.local", Subject:"system:serviceaccount:svcaccounts-6889:default", Audience:jwt.Audience{"oidc-discovery-test"}, Expiry:1692883862, NotBefore:1692883262, IssuedAt:1692883262, ID:""}, Kubernetes:openidmetadata.kubeClaims{Namespace:"svcaccounts-6889", ServiceAccount:openidmetadata.kubeName{Name:"default", UID:"ef028ed8-2f14-4257-b226-5c83301a9727"}}} + + Aug 24 13:21:38.634: INFO: completed pod + [AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 - Jul 29 17:09:16.807: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] Projected secret + Aug 24 13:21:38.646: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] Projected secret + [DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] Projected secret + [DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 - STEP: Destroying namespace "projected-3728" for this suite. 07/29/23 17:09:16.815 + STEP: Destroying namespace "svcaccounts-6889" for this suite. 08/24/23 13:21:38.655 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSS ------------------------------ -[sig-node] Downward API - should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:44 -[BeforeEach] [sig-node] Downward API +[sig-auth] ServiceAccounts + should allow opting out of API token automount [Conformance] + test/e2e/auth/service_accounts.go:161 +[BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:09:16.852 -Jul 29 17:09:16.852: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 17:09:16.854 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:16.891 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:16.895 -[BeforeEach] [sig-node] Downward API +STEP: Creating a kubernetes client 08/24/23 13:21:38.671 +Aug 24 13:21:38.671: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename svcaccounts 08/24/23 13:21:38.673 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:21:38.699 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:21:38.706 +[BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 -[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:44 -STEP: Creating a pod to test downward api env vars 07/29/23 17:09:16.899 -Jul 29 17:09:16.930: INFO: Waiting up to 5m0s for pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f" in namespace "downward-api-9180" to be "Succeeded or Failed" -Jul 29 17:09:16.942: INFO: Pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f": Phase="Pending", Reason="", readiness=false. Elapsed: 12.550609ms -Jul 29 17:09:18.952: INFO: Pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022493492s -Jul 29 17:09:20.954: INFO: Pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f": Phase="Pending", Reason="", readiness=false. Elapsed: 4.024039855s -Jul 29 17:09:22.954: INFO: Pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.024677843s -STEP: Saw pod success 07/29/23 17:09:22.954 -Jul 29 17:09:22.955: INFO: Pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f" satisfied condition "Succeeded or Failed" -Jul 29 17:09:22.963: INFO: Trying to get logs from node wetuj3nuajog-3 pod downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f container dapi-container: -STEP: delete the pod 07/29/23 17:09:22.981 -Jul 29 17:09:23.003: INFO: Waiting for pod downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f to disappear -Jul 29 17:09:23.009: INFO: Pod downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f no longer exists -[AfterEach] [sig-node] Downward API +[It] should allow opting out of API token automount [Conformance] + test/e2e/auth/service_accounts.go:161 +Aug 24 13:21:38.735: INFO: created pod pod-service-account-defaultsa +Aug 24 13:21:38.736: INFO: pod pod-service-account-defaultsa service account token volume mount: true +Aug 24 13:21:38.749: INFO: created pod pod-service-account-mountsa +Aug 24 13:21:38.749: INFO: pod pod-service-account-mountsa service account token volume mount: true +Aug 24 13:21:38.766: INFO: created pod pod-service-account-nomountsa +Aug 24 13:21:38.766: INFO: pod pod-service-account-nomountsa service account token volume mount: false +Aug 24 13:21:38.779: INFO: created pod pod-service-account-defaultsa-mountspec +Aug 24 13:21:38.780: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true +Aug 24 13:21:38.791: INFO: created pod pod-service-account-mountsa-mountspec +Aug 24 13:21:38.792: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true +Aug 24 13:21:38.802: INFO: created pod pod-service-account-nomountsa-mountspec +Aug 24 13:21:38.802: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true +Aug 24 13:21:38.832: INFO: created pod pod-service-account-defaultsa-nomountspec +Aug 24 13:21:38.832: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false +Aug 24 13:21:38.850: INFO: created pod pod-service-account-mountsa-nomountspec +Aug 24 13:21:38.850: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false +Aug 24 13:21:38.914: INFO: created pod pod-service-account-nomountsa-nomountspec +Aug 24 13:21:38.914: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false +[AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 -Jul 29 17:09:23.010: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Downward API +Aug 24 13:21:38.914: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Downward API +[DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Downward API +[DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-9180" for this suite. 07/29/23 17:09:23.018 +STEP: Destroying namespace "svcaccounts-8786" for this suite. 08/24/23 13:21:38.992 ------------------------------ -• [SLOW TEST] [6.181 seconds] -[sig-node] Downward API -test/e2e/common/node/framework.go:23 - should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:44 +• [0.369 seconds] +[sig-auth] ServiceAccounts +test/e2e/auth/framework.go:23 + should allow opting out of API token automount [Conformance] + test/e2e/auth/service_accounts.go:161 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Downward API + [BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:09:16.852 - Jul 29 17:09:16.852: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 17:09:16.854 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:16.891 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:16.895 - [BeforeEach] [sig-node] Downward API + STEP: Creating a kubernetes client 08/24/23 13:21:38.671 + Aug 24 13:21:38.671: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename svcaccounts 08/24/23 13:21:38.673 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:21:38.699 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:21:38.706 + [BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 - [It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:44 - STEP: Creating a pod to test downward api env vars 07/29/23 17:09:16.899 - Jul 29 17:09:16.930: INFO: Waiting up to 5m0s for pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f" in namespace "downward-api-9180" to be "Succeeded or Failed" - Jul 29 17:09:16.942: INFO: Pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f": Phase="Pending", Reason="", readiness=false. Elapsed: 12.550609ms - Jul 29 17:09:18.952: INFO: Pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022493492s - Jul 29 17:09:20.954: INFO: Pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f": Phase="Pending", Reason="", readiness=false. Elapsed: 4.024039855s - Jul 29 17:09:22.954: INFO: Pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.024677843s - STEP: Saw pod success 07/29/23 17:09:22.954 - Jul 29 17:09:22.955: INFO: Pod "downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f" satisfied condition "Succeeded or Failed" - Jul 29 17:09:22.963: INFO: Trying to get logs from node wetuj3nuajog-3 pod downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f container dapi-container: - STEP: delete the pod 07/29/23 17:09:22.981 - Jul 29 17:09:23.003: INFO: Waiting for pod downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f to disappear - Jul 29 17:09:23.009: INFO: Pod downward-api-b1c56899-9f78-457c-86c4-fa50ccf53c1f no longer exists - [AfterEach] [sig-node] Downward API + [It] should allow opting out of API token automount [Conformance] + test/e2e/auth/service_accounts.go:161 + Aug 24 13:21:38.735: INFO: created pod pod-service-account-defaultsa + Aug 24 13:21:38.736: INFO: pod pod-service-account-defaultsa service account token volume mount: true + Aug 24 13:21:38.749: INFO: created pod pod-service-account-mountsa + Aug 24 13:21:38.749: INFO: pod pod-service-account-mountsa service account token volume mount: true + Aug 24 13:21:38.766: INFO: created pod pod-service-account-nomountsa + Aug 24 13:21:38.766: INFO: pod pod-service-account-nomountsa service account token volume mount: false + Aug 24 13:21:38.779: INFO: created pod pod-service-account-defaultsa-mountspec + Aug 24 13:21:38.780: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true + Aug 24 13:21:38.791: INFO: created pod pod-service-account-mountsa-mountspec + Aug 24 13:21:38.792: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true + Aug 24 13:21:38.802: INFO: created pod pod-service-account-nomountsa-mountspec + Aug 24 13:21:38.802: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true + Aug 24 13:21:38.832: INFO: created pod pod-service-account-defaultsa-nomountspec + Aug 24 13:21:38.832: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false + Aug 24 13:21:38.850: INFO: created pod pod-service-account-mountsa-nomountspec + Aug 24 13:21:38.850: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false + Aug 24 13:21:38.914: INFO: created pod pod-service-account-nomountsa-nomountspec + Aug 24 13:21:38.914: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false + [AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 - Jul 29 17:09:23.010: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Downward API + Aug 24 13:21:38.914: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Downward API + [DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Downward API + [DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-9180" for this suite. 07/29/23 17:09:23.018 + STEP: Destroying namespace "svcaccounts-8786" for this suite. 08/24/23 13:21:38.992 << End Captured GinkgoWriter Output ------------------------------ -SS +SSSSSSSSSSSSSSSSSS ------------------------------ -[sig-node] Downward API - should provide pod UID as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:267 -[BeforeEach] [sig-node] Downward API +[sig-instrumentation] Events API + should delete a collection of events [Conformance] + test/e2e/instrumentation/events.go:207 +[BeforeEach] [sig-instrumentation] Events API set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:09:23.039 -Jul 29 17:09:23.039: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename downward-api 07/29/23 17:09:23.041 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:23.073 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:23.078 -[BeforeEach] [sig-node] Downward API +STEP: Creating a kubernetes client 08/24/23 13:21:39.042 +Aug 24 13:21:39.042: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename events 08/24/23 13:21:39.051 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:21:39.166 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:21:39.182 +[BeforeEach] [sig-instrumentation] Events API test/e2e/framework/metrics/init/init.go:31 -[It] should provide pod UID as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:267 -STEP: Creating a pod to test downward api env vars 07/29/23 17:09:23.084 -Jul 29 17:09:23.101: INFO: Waiting up to 5m0s for pod "downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c" in namespace "downward-api-8329" to be "Succeeded or Failed" -Jul 29 17:09:23.106: INFO: Pod "downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.73727ms -Jul 29 17:09:25.114: INFO: Pod "downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012053295s -Jul 29 17:09:27.117: INFO: Pod "downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01573583s -STEP: Saw pod success 07/29/23 17:09:27.118 -Jul 29 17:09:27.118: INFO: Pod "downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c" satisfied condition "Succeeded or Failed" -Jul 29 17:09:27.124: INFO: Trying to get logs from node wetuj3nuajog-3 pod downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c container dapi-container: -STEP: delete the pod 07/29/23 17:09:27.137 -Jul 29 17:09:27.157: INFO: Waiting for pod downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c to disappear -Jul 29 17:09:27.162: INFO: Pod downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c no longer exists -[AfterEach] [sig-node] Downward API +[BeforeEach] [sig-instrumentation] Events API + test/e2e/instrumentation/events.go:84 +[It] should delete a collection of events [Conformance] + test/e2e/instrumentation/events.go:207 +STEP: Create set of events 08/24/23 13:21:39.223 +STEP: get a list of Events with a label in the current namespace 08/24/23 13:21:39.606 +STEP: delete a list of events 08/24/23 13:21:39.814 +Aug 24 13:21:39.815: INFO: requesting DeleteCollection of events +STEP: check that the list of events matches the requested quantity 08/24/23 13:21:39.957 +[AfterEach] [sig-instrumentation] Events API test/e2e/framework/node/init/init.go:32 -Jul 29 17:09:27.163: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-node] Downward API +Aug 24 13:21:40.018: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-instrumentation] Events API test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-node] Downward API +[DeferCleanup (Each)] [sig-instrumentation] Events API dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-node] Downward API +[DeferCleanup (Each)] [sig-instrumentation] Events API tear down framework | framework.go:193 -STEP: Destroying namespace "downward-api-8329" for this suite. 07/29/23 17:09:27.173 +STEP: Destroying namespace "events-4554" for this suite. 08/24/23 13:21:40.115 ------------------------------ -• [4.147 seconds] -[sig-node] Downward API -test/e2e/common/node/framework.go:23 - should provide pod UID as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:267 +• [1.091 seconds] +[sig-instrumentation] Events API +test/e2e/instrumentation/common/framework.go:23 + should delete a collection of events [Conformance] + test/e2e/instrumentation/events.go:207 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-node] Downward API + [BeforeEach] [sig-instrumentation] Events API set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:09:23.039 - Jul 29 17:09:23.039: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename downward-api 07/29/23 17:09:23.041 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:23.073 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:23.078 - [BeforeEach] [sig-node] Downward API + STEP: Creating a kubernetes client 08/24/23 13:21:39.042 + Aug 24 13:21:39.042: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename events 08/24/23 13:21:39.051 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:21:39.166 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:21:39.182 + [BeforeEach] [sig-instrumentation] Events API test/e2e/framework/metrics/init/init.go:31 - [It] should provide pod UID as env vars [NodeConformance] [Conformance] - test/e2e/common/node/downwardapi.go:267 - STEP: Creating a pod to test downward api env vars 07/29/23 17:09:23.084 - Jul 29 17:09:23.101: INFO: Waiting up to 5m0s for pod "downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c" in namespace "downward-api-8329" to be "Succeeded or Failed" - Jul 29 17:09:23.106: INFO: Pod "downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.73727ms - Jul 29 17:09:25.114: INFO: Pod "downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012053295s - Jul 29 17:09:27.117: INFO: Pod "downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01573583s - STEP: Saw pod success 07/29/23 17:09:27.118 - Jul 29 17:09:27.118: INFO: Pod "downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c" satisfied condition "Succeeded or Failed" - Jul 29 17:09:27.124: INFO: Trying to get logs from node wetuj3nuajog-3 pod downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c container dapi-container: - STEP: delete the pod 07/29/23 17:09:27.137 - Jul 29 17:09:27.157: INFO: Waiting for pod downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c to disappear - Jul 29 17:09:27.162: INFO: Pod downward-api-663b0b5c-3859-4e19-b48d-c55e8895bd9c no longer exists - [AfterEach] [sig-node] Downward API + [BeforeEach] [sig-instrumentation] Events API + test/e2e/instrumentation/events.go:84 + [It] should delete a collection of events [Conformance] + test/e2e/instrumentation/events.go:207 + STEP: Create set of events 08/24/23 13:21:39.223 + STEP: get a list of Events with a label in the current namespace 08/24/23 13:21:39.606 + STEP: delete a list of events 08/24/23 13:21:39.814 + Aug 24 13:21:39.815: INFO: requesting DeleteCollection of events + STEP: check that the list of events matches the requested quantity 08/24/23 13:21:39.957 + [AfterEach] [sig-instrumentation] Events API test/e2e/framework/node/init/init.go:32 - Jul 29 17:09:27.163: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-node] Downward API + Aug 24 13:21:40.018: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-instrumentation] Events API test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-node] Downward API + [DeferCleanup (Each)] [sig-instrumentation] Events API dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-node] Downward API + [DeferCleanup (Each)] [sig-instrumentation] Events API tear down framework | framework.go:193 - STEP: Destroying namespace "downward-api-8329" for this suite. 07/29/23 17:09:27.173 + STEP: Destroying namespace "events-4554" for this suite. 08/24/23 13:21:40.115 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ -[sig-network] Proxy version v1 - A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] - test/e2e/network/proxy.go:286 -[BeforeEach] version v1 +[sig-storage] Projected downwardAPI + should provide podname only [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:53 +[BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:09:27.197 -Jul 29 17:09:27.198: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename proxy 07/29/23 17:09:27.199 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:27.229 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:27.234 -[BeforeEach] version v1 +STEP: Creating a kubernetes client 08/24/23 13:21:40.251 +Aug 24 13:21:40.251: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename projected 08/24/23 13:21:40.255 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:21:40.286 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:21:40.3 +[BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 -[It] A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] - test/e2e/network/proxy.go:286 -Jul 29 17:09:27.238: INFO: Creating pod... -Jul 29 17:09:27.256: INFO: Waiting up to 5m0s for pod "agnhost" in namespace "proxy-4287" to be "running" -Jul 29 17:09:27.261: INFO: Pod "agnhost": Phase="Pending", Reason="", readiness=false. Elapsed: 5.003526ms -Jul 29 17:09:29.268: INFO: Pod "agnhost": Phase="Running", Reason="", readiness=true. Elapsed: 2.011842727s -Jul 29 17:09:29.268: INFO: Pod "agnhost" satisfied condition "running" -Jul 29 17:09:29.268: INFO: Creating service... -Jul 29 17:09:29.288: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/DELETE -Jul 29 17:09:29.311: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE -Jul 29 17:09:29.311: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/GET -Jul 29 17:09:29.326: INFO: http.Client request:GET | StatusCode:200 | Response:foo | Method:GET -Jul 29 17:09:29.326: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/HEAD -Jul 29 17:09:29.334: INFO: http.Client request:HEAD | StatusCode:200 -Jul 29 17:09:29.334: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/OPTIONS -Jul 29 17:09:29.341: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS -Jul 29 17:09:29.341: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/PATCH -Jul 29 17:09:29.347: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH -Jul 29 17:09:29.347: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/POST -Jul 29 17:09:29.354: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST -Jul 29 17:09:29.354: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/PUT -Jul 29 17:09:29.364: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT -Jul 29 17:09:29.364: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/DELETE -Jul 29 17:09:29.374: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE -Jul 29 17:09:29.374: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/GET -Jul 29 17:09:29.385: INFO: http.Client request:GET | StatusCode:200 | Response:foo | Method:GET -Jul 29 17:09:29.385: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/HEAD -Jul 29 17:09:29.397: INFO: http.Client request:HEAD | StatusCode:200 -Jul 29 17:09:29.397: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/OPTIONS -Jul 29 17:09:29.409: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS -Jul 29 17:09:29.409: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/PATCH -Jul 29 17:09:29.422: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH -Jul 29 17:09:29.422: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/POST -Jul 29 17:09:29.433: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST -Jul 29 17:09:29.433: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/PUT -Jul 29 17:09:29.444: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT -[AfterEach] version v1 +[BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 +[It] should provide podname only [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:53 +STEP: Creating a pod to test downward API volume plugin 08/24/23 13:21:40.314 +Aug 24 13:21:40.338: INFO: Waiting up to 5m0s for pod "downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6" in namespace "projected-2124" to be "Succeeded or Failed" +Aug 24 13:21:40.374: INFO: Pod "downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6": Phase="Pending", Reason="", readiness=false. Elapsed: 27.827302ms +Aug 24 13:21:42.388: INFO: Pod "downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.042000862s +Aug 24 13:21:44.385: INFO: Pod "downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.039157323s +STEP: Saw pod success 08/24/23 13:21:44.385 +Aug 24 13:21:44.385: INFO: Pod "downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6" satisfied condition "Succeeded or Failed" +Aug 24 13:21:44.391: INFO: Trying to get logs from node pe9deep4seen-2 pod downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6 container client-container: +STEP: delete the pod 08/24/23 13:21:44.43 +Aug 24 13:21:44.455: INFO: Waiting for pod downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6 to disappear +Aug 24 13:21:44.462: INFO: Pod downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 -Jul 29 17:09:29.445: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] version v1 +Aug 24 13:21:44.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] version v1 +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 -[DeferCleanup (Each)] version v1 +[DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 -STEP: Destroying namespace "proxy-4287" for this suite. 07/29/23 17:09:29.456 +STEP: Destroying namespace "projected-2124" for this suite. 08/24/23 13:21:44.47 ------------------------------ -• [2.268 seconds] -[sig-network] Proxy -test/e2e/network/common/framework.go:23 - version v1 - test/e2e/network/proxy.go:74 - A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] - test/e2e/network/proxy.go:286 +• [4.237 seconds] +[sig-storage] Projected downwardAPI +test/e2e/common/storage/framework.go:23 + should provide podname only [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:53 Begin Captured GinkgoWriter Output >> - [BeforeEach] version v1 + [BeforeEach] [sig-storage] Projected downwardAPI set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:09:27.197 - Jul 29 17:09:27.198: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename proxy 07/29/23 17:09:27.199 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:27.229 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:27.234 - [BeforeEach] version v1 + STEP: Creating a kubernetes client 08/24/23 13:21:40.251 + Aug 24 13:21:40.251: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename projected 08/24/23 13:21:40.255 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:21:40.286 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:21:40.3 + [BeforeEach] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:31 - [It] A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] - test/e2e/network/proxy.go:286 - Jul 29 17:09:27.238: INFO: Creating pod... - Jul 29 17:09:27.256: INFO: Waiting up to 5m0s for pod "agnhost" in namespace "proxy-4287" to be "running" - Jul 29 17:09:27.261: INFO: Pod "agnhost": Phase="Pending", Reason="", readiness=false. Elapsed: 5.003526ms - Jul 29 17:09:29.268: INFO: Pod "agnhost": Phase="Running", Reason="", readiness=true. Elapsed: 2.011842727s - Jul 29 17:09:29.268: INFO: Pod "agnhost" satisfied condition "running" - Jul 29 17:09:29.268: INFO: Creating service... - Jul 29 17:09:29.288: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/DELETE - Jul 29 17:09:29.311: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE - Jul 29 17:09:29.311: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/GET - Jul 29 17:09:29.326: INFO: http.Client request:GET | StatusCode:200 | Response:foo | Method:GET - Jul 29 17:09:29.326: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/HEAD - Jul 29 17:09:29.334: INFO: http.Client request:HEAD | StatusCode:200 - Jul 29 17:09:29.334: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/OPTIONS - Jul 29 17:09:29.341: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS - Jul 29 17:09:29.341: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/PATCH - Jul 29 17:09:29.347: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH - Jul 29 17:09:29.347: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/POST - Jul 29 17:09:29.354: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST - Jul 29 17:09:29.354: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/pods/agnhost/proxy/some/path/with/PUT - Jul 29 17:09:29.364: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT - Jul 29 17:09:29.364: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/DELETE - Jul 29 17:09:29.374: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE - Jul 29 17:09:29.374: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/GET - Jul 29 17:09:29.385: INFO: http.Client request:GET | StatusCode:200 | Response:foo | Method:GET - Jul 29 17:09:29.385: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/HEAD - Jul 29 17:09:29.397: INFO: http.Client request:HEAD | StatusCode:200 - Jul 29 17:09:29.397: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/OPTIONS - Jul 29 17:09:29.409: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS - Jul 29 17:09:29.409: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/PATCH - Jul 29 17:09:29.422: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH - Jul 29 17:09:29.422: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/POST - Jul 29 17:09:29.433: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST - Jul 29 17:09:29.433: INFO: Starting http.Client for https://10.233.0.1:443/api/v1/namespaces/proxy-4287/services/test-service/proxy/some/path/with/PUT - Jul 29 17:09:29.444: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT - [AfterEach] version v1 + [BeforeEach] [sig-storage] Projected downwardAPI + test/e2e/common/storage/projected_downwardapi.go:44 + [It] should provide podname only [NodeConformance] [Conformance] + test/e2e/common/storage/projected_downwardapi.go:53 + STEP: Creating a pod to test downward API volume plugin 08/24/23 13:21:40.314 + Aug 24 13:21:40.338: INFO: Waiting up to 5m0s for pod "downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6" in namespace "projected-2124" to be "Succeeded or Failed" + Aug 24 13:21:40.374: INFO: Pod "downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6": Phase="Pending", Reason="", readiness=false. Elapsed: 27.827302ms + Aug 24 13:21:42.388: INFO: Pod "downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.042000862s + Aug 24 13:21:44.385: INFO: Pod "downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.039157323s + STEP: Saw pod success 08/24/23 13:21:44.385 + Aug 24 13:21:44.385: INFO: Pod "downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6" satisfied condition "Succeeded or Failed" + Aug 24 13:21:44.391: INFO: Trying to get logs from node pe9deep4seen-2 pod downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6 container client-container: + STEP: delete the pod 08/24/23 13:21:44.43 + Aug 24 13:21:44.455: INFO: Waiting for pod downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6 to disappear + Aug 24 13:21:44.462: INFO: Pod downwardapi-volume-7ddd0f9e-b645-469d-a906-b90138fa08e6 no longer exists + [AfterEach] [sig-storage] Projected downwardAPI test/e2e/framework/node/init/init.go:32 - Jul 29 17:09:29.445: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] version v1 + Aug 24 13:21:44.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] version v1 + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI dump namespaces | framework.go:196 - [DeferCleanup (Each)] version v1 + [DeferCleanup (Each)] [sig-storage] Projected downwardAPI tear down framework | framework.go:193 - STEP: Destroying namespace "proxy-4287" for this suite. 07/29/23 17:09:29.456 + STEP: Destroying namespace "projected-2124" for this suite. 08/24/23 13:21:44.47 << End Captured GinkgoWriter Output ------------------------------ -SSSSSSS +SSSSSSSSSS ------------------------------ -[sig-storage] CSIStorageCapacity - should support CSIStorageCapacities API operations [Conformance] - test/e2e/storage/csistoragecapacity.go:49 -[BeforeEach] [sig-storage] CSIStorageCapacity +[sig-apps] DisruptionController + should observe PodDisruptionBudget status updated [Conformance] + test/e2e/apps/disruption.go:141 +[BeforeEach] [sig-apps] DisruptionController set up framework | framework.go:178 -STEP: Creating a kubernetes client 07/29/23 17:09:29.467 -Jul 29 17:09:29.467: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 -STEP: Building a namespace api object, basename csistoragecapacity 07/29/23 17:09:29.468 -STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:29.504 -STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:29.519 -[BeforeEach] [sig-storage] CSIStorageCapacity +STEP: Creating a kubernetes client 08/24/23 13:21:44.492 +Aug 24 13:21:44.492: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 +STEP: Building a namespace api object, basename disruption 08/24/23 13:21:44.495 +STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:21:44.556 +STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:21:44.565 +[BeforeEach] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:31 -[It] should support CSIStorageCapacities API operations [Conformance] - test/e2e/storage/csistoragecapacity.go:49 -STEP: getting /apis 07/29/23 17:09:29.524 -STEP: getting /apis/storage.k8s.io 07/29/23 17:09:29.533 -STEP: getting /apis/storage.k8s.io/v1 07/29/23 17:09:29.535 -STEP: creating 07/29/23 17:09:29.537 -STEP: watching 07/29/23 17:09:29.593 -Jul 29 17:09:29.593: INFO: starting watch -STEP: getting 07/29/23 17:09:29.603 -STEP: listing in namespace 07/29/23 17:09:29.61 -STEP: listing across namespaces 07/29/23 17:09:29.617 -STEP: patching 07/29/23 17:09:29.635 -STEP: updating 07/29/23 17:09:29.654 -Jul 29 17:09:29.664: INFO: waiting for watch events with expected annotations in namespace -Jul 29 17:09:29.664: INFO: waiting for watch events with expected annotations across namespace -STEP: deleting 07/29/23 17:09:29.665 -STEP: deleting a collection 07/29/23 17:09:29.697 -[AfterEach] [sig-storage] CSIStorageCapacity +[BeforeEach] [sig-apps] DisruptionController + test/e2e/apps/disruption.go:72 +[It] should observe PodDisruptionBudget status updated [Conformance] + test/e2e/apps/disruption.go:141 +STEP: Waiting for the pdb to be processed 08/24/23 13:21:44.585 +STEP: Waiting for all pods to be running 08/24/23 13:21:46.664 +Aug 24 13:21:46.724: INFO: running pods: 0 < 3 +Aug 24 13:21:48.783: INFO: running pods: 0 < 3 +[AfterEach] [sig-apps] DisruptionController test/e2e/framework/node/init/init.go:32 -Jul 29 17:09:29.725: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready -[DeferCleanup (Each)] [sig-storage] CSIStorageCapacity +Aug 24 13:21:50.739: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +[DeferCleanup (Each)] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:33 -[DeferCleanup (Each)] [sig-storage] CSIStorageCapacity +[DeferCleanup (Each)] [sig-apps] DisruptionController dump namespaces | framework.go:196 -[DeferCleanup (Each)] [sig-storage] CSIStorageCapacity +[DeferCleanup (Each)] [sig-apps] DisruptionController tear down framework | framework.go:193 -STEP: Destroying namespace "csistoragecapacity-1995" for this suite. 07/29/23 17:09:29.733 +STEP: Destroying namespace "disruption-1681" for this suite. 08/24/23 13:21:50.752 ------------------------------ -• [0.276 seconds] -[sig-storage] CSIStorageCapacity -test/e2e/storage/utils/framework.go:23 - should support CSIStorageCapacities API operations [Conformance] - test/e2e/storage/csistoragecapacity.go:49 +• [SLOW TEST] [6.272 seconds] +[sig-apps] DisruptionController +test/e2e/apps/framework.go:23 + should observe PodDisruptionBudget status updated [Conformance] + test/e2e/apps/disruption.go:141 Begin Captured GinkgoWriter Output >> - [BeforeEach] [sig-storage] CSIStorageCapacity + [BeforeEach] [sig-apps] DisruptionController set up framework | framework.go:178 - STEP: Creating a kubernetes client 07/29/23 17:09:29.467 - Jul 29 17:09:29.467: INFO: >>> kubeConfig: /tmp/kubeconfig-3177299396 - STEP: Building a namespace api object, basename csistoragecapacity 07/29/23 17:09:29.468 - STEP: Waiting for a default service account to be provisioned in namespace 07/29/23 17:09:29.504 - STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 07/29/23 17:09:29.519 - [BeforeEach] [sig-storage] CSIStorageCapacity + STEP: Creating a kubernetes client 08/24/23 13:21:44.492 + Aug 24 13:21:44.492: INFO: >>> kubeConfig: /tmp/kubeconfig-2729572383 + STEP: Building a namespace api object, basename disruption 08/24/23 13:21:44.495 + STEP: Waiting for a default service account to be provisioned in namespace 08/24/23 13:21:44.556 + STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 08/24/23 13:21:44.565 + [BeforeEach] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:31 - [It] should support CSIStorageCapacities API operations [Conformance] - test/e2e/storage/csistoragecapacity.go:49 - STEP: getting /apis 07/29/23 17:09:29.524 - STEP: getting /apis/storage.k8s.io 07/29/23 17:09:29.533 - STEP: getting /apis/storage.k8s.io/v1 07/29/23 17:09:29.535 - STEP: creating 07/29/23 17:09:29.537 - STEP: watching 07/29/23 17:09:29.593 - Jul 29 17:09:29.593: INFO: starting watch - STEP: getting 07/29/23 17:09:29.603 - STEP: listing in namespace 07/29/23 17:09:29.61 - STEP: listing across namespaces 07/29/23 17:09:29.617 - STEP: patching 07/29/23 17:09:29.635 - STEP: updating 07/29/23 17:09:29.654 - Jul 29 17:09:29.664: INFO: waiting for watch events with expected annotations in namespace - Jul 29 17:09:29.664: INFO: waiting for watch events with expected annotations across namespace - STEP: deleting 07/29/23 17:09:29.665 - STEP: deleting a collection 07/29/23 17:09:29.697 - [AfterEach] [sig-storage] CSIStorageCapacity + [BeforeEach] [sig-apps] DisruptionController + test/e2e/apps/disruption.go:72 + [It] should observe PodDisruptionBudget status updated [Conformance] + test/e2e/apps/disruption.go:141 + STEP: Waiting for the pdb to be processed 08/24/23 13:21:44.585 + STEP: Waiting for all pods to be running 08/24/23 13:21:46.664 + Aug 24 13:21:46.724: INFO: running pods: 0 < 3 + Aug 24 13:21:48.783: INFO: running pods: 0 < 3 + [AfterEach] [sig-apps] DisruptionController test/e2e/framework/node/init/init.go:32 - Jul 29 17:09:29.725: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready - [DeferCleanup (Each)] [sig-storage] CSIStorageCapacity + Aug 24 13:21:50.739: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready + [DeferCleanup (Each)] [sig-apps] DisruptionController test/e2e/framework/metrics/init/init.go:33 - [DeferCleanup (Each)] [sig-storage] CSIStorageCapacity + [DeferCleanup (Each)] [sig-apps] DisruptionController dump namespaces | framework.go:196 - [DeferCleanup (Each)] [sig-storage] CSIStorageCapacity + [DeferCleanup (Each)] [sig-apps] DisruptionController tear down framework | framework.go:193 - STEP: Destroying namespace "csistoragecapacity-1995" for this suite. 07/29/23 17:09:29.733 + STEP: Destroying namespace "disruption-1681" for this suite. 08/24/23 13:21:50.752 << End Captured GinkgoWriter Output ------------------------------ -SSSS +SSSSSSS ------------------------------ [SynchronizedAfterSuite] test/e2e/e2e.go:88 @@ -37968,10 +37880,10 @@ test/e2e/e2e.go:88 test/e2e/e2e.go:88 [SynchronizedAfterSuite] TOP-LEVEL test/e2e/e2e.go:88 -Jul 29 17:09:29.747: INFO: Running AfterSuite actions on node 1 -Jul 29 17:09:29.747: INFO: Skipping dumping logs from cluster +Aug 24 13:21:50.777: INFO: Running AfterSuite actions on node 1 +Aug 24 13:21:50.777: INFO: Skipping dumping logs from cluster ------------------------------ -[SynchronizedAfterSuite] PASSED [0.000 seconds] +[SynchronizedAfterSuite] PASSED [0.001 seconds] [SynchronizedAfterSuite] test/e2e/e2e.go:88 @@ -37980,8 +37892,8 @@ test/e2e/e2e.go:88 test/e2e/e2e.go:88 [SynchronizedAfterSuite] TOP-LEVEL test/e2e/e2e.go:88 - Jul 29 17:09:29.747: INFO: Running AfterSuite actions on node 1 - Jul 29 17:09:29.747: INFO: Skipping dumping logs from cluster + Aug 24 13:21:50.777: INFO: Running AfterSuite actions on node 1 + Aug 24 13:21:50.777: INFO: Skipping dumping logs from cluster << End Captured GinkgoWriter Output ------------------------------ [ReportAfterSuite] Kubernetes e2e suite report @@ -38003,7 +37915,7 @@ test/e2e/framework/test_context.go:529 [ReportAfterSuite] TOP-LEVEL test/e2e/framework/test_context.go:529 ------------------------------ -[ReportAfterSuite] PASSED [0.203 seconds] +[ReportAfterSuite] PASSED [0.188 seconds] [ReportAfterSuite] Kubernetes e2e JUnit report test/e2e/framework/test_context.go:529 @@ -38013,11 +37925,11 @@ test/e2e/framework/test_context.go:529 << End Captured GinkgoWriter Output ------------------------------ -Ran 368 of 7069 Specs in 5967.296 seconds +Ran 368 of 7069 Specs in 6157.505 seconds SUCCESS! -- 368 Passed | 0 Failed | 0 Pending | 6701 Skipped PASS -Ginkgo ran 1 suite in 1h39m28.232142255s +Ginkgo ran 1 suite in 1h42m38.41461006s Test Suite Passed You're using deprecated Ginkgo functionality: ============================================= diff --git a/v1.26/alvistack-vagrant-kubernetes/junit_01.xml b/v1.26/alvistack-vagrant-kubernetes/junit_01.xml index d554fea5d3..9d2994edce 100644 --- a/v1.26/alvistack-vagrant-kubernetes/junit_01.xml +++ b/v1.26/alvistack-vagrant-kubernetes/junit_01.xml @@ -1,12 +1,12 @@ - - + + - + @@ -21,20479 +21,20479 @@ - - + + - + - + - + + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - - + - + - + + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + + - + - + - + + - + - + - + + - + - + - - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - - + - + + + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + + - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - - + - - + - + - + - - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + - - + - + - + - + - - + - + - + - - + - + - - + - + - + - + - + - + - + - + - + - + - + - - + - - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - - + + - - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - - + - + - + + - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - - - + - + - + - + - + - + + - + - + - + - + + - + - + - + - - + + - + - + - - + - + + + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - - + + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + - + - + - + - + - + - + - + - + - + - + - - + - - + - - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - - + - + - + - - + - + + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + - - + - + - - + - + - + - + + - + - + - + + - + - + - + - + - + + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + + - + - + - + - + - + - + - + - + - - + + - + - - + - + - + - + - + - - + - + - + + + - + - + - + - + - + - + - + - - + - + - - + - + - + + - + + - + - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - - + - + + - + - + - + - + - + - + - + + - + - + - + - + - + - + - - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + + - + - + - + - + - + + - + - - + - + + - + - + + - + - + - + + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - - + - + - + - + - + - + - - + - + - + - + - + - + + - + - + - + + - + - + - + - + - + - - + - + - - + - - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - - + - + - + - + - + - + + - - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + + - + + - + - + - + - + - + + - + - + + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - - + - + + - + + - + - + - - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + + - + - + - - + - + - + - + - + + - + + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - - + - - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - - + - + - - + - + - + - + - + + - - + - + - + - + - + - + - + - + - + + - - + - + - + - + - + - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + - - + - + + - + - + - - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + + - - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - - + + + + - + - + - + - + - + - + - + - + - + + + - + - + - - + - + - + - - + - + - + - + - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + + - + - + + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + + - - + - + - + - + - + - + - + - + - + - + - + - - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - - + - + - + - + - - + - + - + - + - + + - - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - - - + - + - + - + - + - + + - + - + - + - + - + + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - - + - + - + - + - - + - + - + - + - + - + - + - + - - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - - + - + - + - + - + - + - + - + - + - - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - + - + - + - + - + - + - + - + - + - + + - + - + + - + - + - - + - + - + - + - + - + + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + + - - - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - - + - - + + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + + - + - + - + - + - + - + + - + - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + + - - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - - - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + + - - + - + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + + - - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - - + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + + - + - + + - + - + + - + - + - + - + - + + - - + - + + - + - - + - + - + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - - + - + - + - + - + - + - + - + - + + - + - - + - + - + - + - + - + - - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + + + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - - + - - + - + - + - + + - + - + - + - + - + - + - + - - + - + - + - + - + - + + + - - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + + - + - + - + + - + - + - - - - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - - + - + - + - + - + - + - + - + - - + - + + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - + - + - + - + - + - + - + - - + - + - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + - + - + - + - + - + - - + - + - + - + - + + - + - + - - + - + - + - + - - + + \ No newline at end of file