go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-apps\]\sStatefulSet\sBasic\sStatefulSet\sfunctionality\s\[StatefulSetBasic\]\sBurst\sscaling\sshould\srun\sto\scompletion\seven\swith\sunhealthy\spods\s\[Slow\]\s\[Conformance\]$'
test/e2e/framework/statefulset/rest.go:69 k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x801de88, 0xc001bcc1a0}, 0xc0053b4f00) test/e2e/framework/statefulset/rest.go:69 +0x153 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1() test/e2e/framework/statefulset/wait.go:37 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0xc004b669d0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 +0xbd k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 +0x3d0 There were additional failures detected after the initial failure: [FAILED] Nov 26 03:37:06.638: Get "https://34.168.169.190/apis/apps/v1/namespaces/statefulset-7967/statefulsets": dial tcp 34.168.169.190:443: connect: connection refused In [AfterEach] at: test/e2e/framework/statefulset/rest.go:76 ---------- [FAILED] Nov 26 03:37:06.717: failed to list events in namespace "statefulset-7967": Get "https://34.168.169.190/api/v1/namespaces/statefulset-7967/events": dial tcp 34.168.169.190:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 03:37:06.757: Couldn't delete ns: "statefulset-7967": Delete "https://34.168.169.190/api/v1/namespaces/statefulset-7967": dial tcp 34.168.169.190:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.169.190/api/v1/namespaces/statefulset-7967", Err:(*net.OpError)(0xc004a7ba40)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-apps] StatefulSet set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 03:19:45.644 Nov 26 03:19:45.644: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename statefulset 11/26/22 03:19:45.645 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 03:19:45.807 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 03:19:45.905 [BeforeEach] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-apps] StatefulSet test/e2e/apps/statefulset.go:98 [BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:113 STEP: Creating service test in namespace statefulset-7967 11/26/22 03:19:45.992 [It] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] test/e2e/apps/statefulset.go:697 STEP: Creating stateful set ss in namespace statefulset-7967 11/26/22 03:19:46.07 STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-7967 11/26/22 03:19:46.124 Nov 26 03:19:46.176: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Pending - Ready=false Nov 26 03:19:56.235: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:20:06.232: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:20:16.255: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:20:26.256: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:20:36.226: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod 11/26/22 03:20:36.227 Nov 26 03:20:36.289: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:20:36.799: INFO: rc: 1 Nov 26 03:20:36.799: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:20:46.800: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:20:47.245: INFO: rc: 1 Nov 26 03:20:47.245: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:20:57.245: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:20:57.825: INFO: rc: 1 Nov 26 03:20:57.825: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:21:07.826: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:21:08.436: INFO: rc: 1 Nov 26 03:21:08.436: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:21:18.437: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:21:19.077: INFO: rc: 1 Nov 26 03:21:19.077: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:21:29.077: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:21:29.607: INFO: rc: 1 Nov 26 03:21:29.607: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:21:39.608: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:21:40.125: INFO: rc: 1 Nov 26 03:21:40.125: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 Nov 26 03:21:50.126: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:21:50.593: INFO: rc: 1 Nov 26 03:21:50.594: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 Nov 26 03:22:00.595: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:22:01.020: INFO: rc: 1 Nov 26 03:22:01.020: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 Nov 26 03:22:11.020: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:22:41.455: INFO: rc: 1 Nov 26 03:22:41.455: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: Error from server: error dialing backend: context deadline exceeded: connection error: desc = "transport: Error while dialing dial unix /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket: connect: no such file or directory" error: exit status 1 Nov 26 03:22:51.456: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:23:21.874: INFO: rc: 1 Nov 26 03:23:21.874: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: Error from server: error dialing backend: context deadline exceeded: connection error: desc = "transport: Error while dialing dial unix /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket: connect: no such file or directory" error: exit status 1 Nov 26 03:23:31.875: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:24:02.267: INFO: rc: 1 Nov 26 03:24:02.267: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: Error from server: error dialing backend: context deadline exceeded: connection error: desc = "transport: Error while dialing dial unix /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket: connect: no such file or directory" error: exit status 1 Nov 26 03:24:12.267: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:24:42.692: INFO: rc: 1 Nov 26 03:24:42.692: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: Error from server: error dialing backend: context deadline exceeded: connection error: desc = "transport: Error while dialing dial unix /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket: connect: no such file or directory" error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 5m0.426s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 5m0.001s) test/e2e/apps/statefulset.go:697 At [By Step] Confirming that stateful set scale up will not halt with unhealthy stateful pod (Step Runtime: 4m9.844s) test/e2e/apps/statefulset.go:710 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc004b66760, 0x10}, {0xc004b6674c, 0x4}, {0xc004c5e1c0, 0x38}, 0xc0015486d0?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc004c5e1c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.breakHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1704 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:711 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:24:52.693: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 5m20.428s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 5m20.003s) test/e2e/apps/statefulset.go:697 At [By Step] Confirming that stateful set scale up will not halt with unhealthy stateful pod (Step Runtime: 4m29.846s) test/e2e/apps/statefulset.go:710 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc002026000?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc004b66760?, 0x4?}, {0xc000a7d908?, 0x29?, 0xc000a7d8c8?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc004b66760, 0x10}, {0xc004b6674c, 0x4}, {0xc004c5e1c0, 0x38}, 0xc0015486d0?, 0x45d964b800) test/e2e/framework/pod/output/output.go:105 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc004c5e1c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.breakHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1704 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:711 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:25:18.193: INFO: rc: 1 Nov 26 03:25:18.193: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 5m40.431s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 5m40.005s) test/e2e/apps/statefulset.go:697 At [By Step] Confirming that stateful set scale up will not halt with unhealthy stateful pod (Step Runtime: 4m49.848s) test/e2e/apps/statefulset.go:710 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc004b66760, 0x10}, {0xc004b6674c, 0x4}, {0xc004c5e1c0, 0x38}, 0xc0015486d0?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc004c5e1c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.breakHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1704 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:711 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:25:28.193: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:25:28.684: INFO: rc: 1 Nov 26 03:25:28.684: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:25:38.684: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' Nov 26 03:25:39.131: INFO: rc: 1 Nov 26 03:25:39.131: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: Nov 26 03:25:39.525: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false Nov 26 03:25:39.525: INFO: Waiting for statefulset status.replicas updated to 0 Nov 26 03:25:40.155: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 03:25:40.155: INFO: ss-0 bootstrap-e2e-minion-group-7rps Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:46 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:21:33 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:21:33 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:46 +0000 UTC }] Nov 26 03:25:40.155: INFO: Nov 26 03:25:40.155: INFO: StatefulSet ss has not reached scale 3, at 1 Nov 26 03:25:41.292: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.759113397s Nov 26 03:25:42.372: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.622622596s Nov 26 03:25:43.464: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.541684665s Nov 26 03:25:44.533: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.449737673s Nov 26 03:25:45.687: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.381423766s ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 6m0.432s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 6m0.007s) test/e2e/apps/statefulset.go:697 At [By Step] Confirming that stateful set scale up will not halt with unhealthy stateful pod (Step Runtime: 5m9.85s) test/e2e/apps/statefulset.go:710 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x3b9aca00) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/apps.confirmStatefulPodCount({0x801de88, 0xc001bcc1a0}, 0x3, 0xc0053b4f00, 0x0?, 0x0) test/e2e/apps/statefulset.go:1685 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:715 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:25:46.753: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.226602703s Nov 26 03:25:47.811: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.161586423s Nov 26 03:25:48.884: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.102591013s Nov 26 03:25:49.940: INFO: Verifying statefulset ss doesn't scale past 3 for another 30.552615ms STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 11/26/22 03:25:50.94 Nov 26 03:25:50.995: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:25:51.586: INFO: rc: 1 Nov 26 03:25:51.586: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 Nov 26 03:26:01.586: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:26:01.979: INFO: rc: 1 Nov 26 03:26:01.979: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 6m20.435s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 6m20.009s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 15.139s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:26:11.980: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:26:12.339: INFO: rc: 1 Nov 26 03:26:12.339: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 Nov 26 03:26:22.339: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:26:22.681: INFO: rc: 1 Nov 26 03:26:22.681: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 6m40.437s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 6m40.011s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 35.141s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:26:32.682: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:26:33.018: INFO: rc: 1 Nov 26 03:26:33.018: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 Nov 26 03:26:43.019: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:26:43.402: INFO: rc: 1 Nov 26 03:26:43.402: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 7m0.439s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 7m0.013s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 55.143s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:26:53.402: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:26:54.104: INFO: rc: 1 Nov 26 03:26:54.104: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:27:04.105: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:27:04.756: INFO: rc: 1 Nov 26 03:27:04.756: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 7m20.441s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 7m20.015s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 1m15.145s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:27:14.756: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:27:15.520: INFO: rc: 1 Nov 26 03:27:15.520: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:27:25.521: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:27:26.012: INFO: rc: 1 Nov 26 03:27:26.012: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 7m40.443s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 7m40.017s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 1m35.147s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:27:36.013: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:27:36.622: INFO: rc: 1 Nov 26 03:27:36.622: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 8m0.445s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 8m0.019s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 1m55.149s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:27:46.623: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:27:47.128: INFO: rc: 1 Nov 26 03:27:47.128: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:27:57.128: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:27:57.577: INFO: rc: 1 Nov 26 03:27:57.577: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 8m20.447s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 8m20.021s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 2m15.151s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:28:07.578: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:28:08.157: INFO: rc: 1 Nov 26 03:28:08.157: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:28:18.158: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:28:18.669: INFO: rc: 1 Nov 26 03:28:18.669: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 8m40.449s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 8m40.024s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 2m35.153s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:28:28.671: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:28:29.553: INFO: rc: 1 Nov 26 03:28:29.553: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:28:39.553: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:28:40.134: INFO: rc: 1 Nov 26 03:28:40.134: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 9m0.453s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 9m0.027s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 2m55.157s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:28:50.135: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:28:50.672: INFO: rc: 1 Nov 26 03:28:50.673: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:29:00.673: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:29:01.146: INFO: rc: 1 Nov 26 03:29:01.146: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 9m20.456s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 9m20.03s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 3m15.16s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:29:11.146: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:29:11.831: INFO: rc: 1 Nov 26 03:29:11.831: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 Nov 26 03:29:21.838: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:29:22.277: INFO: rc: 1 Nov 26 03:29:22.277: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 9m40.458s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 9m40.032s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 3m35.162s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:29:32.278: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:29:32.778: INFO: rc: 1 Nov 26 03:29:32.779: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 Nov 26 03:29:42.779: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:29:43.274: INFO: rc: 1 Nov 26 03:29:43.275: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 10m0.461s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 10m0.035s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 3m55.165s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:29:53.275: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:29:53.710: INFO: rc: 1 Nov 26 03:29:53.710: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 Nov 26 03:30:03.711: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:30:04.094: INFO: rc: 1 Nov 26 03:30:04.094: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 10m20.463s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 10m20.037s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 4m15.167s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:30:14.095: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:30:14.448: INFO: rc: 1 Nov 26 03:30:14.449: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 Nov 26 03:30:24.449: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:30:25.223: INFO: rc: 1 Nov 26 03:30:25.223: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 10m40.465s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 10m40.039s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 4m35.169s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:30:35.223: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:30:35.608: INFO: rc: 1 Nov 26 03:30:35.608: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 Nov 26 03:30:45.609: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:30:46.013: INFO: rc: 1 Nov 26 03:30:46.013: INFO: Waiting 10s to retry failed RunHostCmd: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true: Command stdout: stderr: error: unable to upgrade connection: container not found ("webserver") error: exit status 1 ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 11m0.467s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 11m0.042s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 4m55.171s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [sleep] time.Sleep(0x2540be400) /usr/local/go/src/runtime/time.go:195 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmdWithRetries({0xc0045b3160, 0x10}, {0xc0045b314c, 0x4}, {0xc0037795c0, 0x38}, 0xc000244190?, 0x45d964b800) test/e2e/framework/pod/output/output.go:113 k8s.io/kubernetes/test/e2e/framework/statefulset.ExecInStatefulPods({0x801de88?, 0xc001bcc1a0?}, 0xc000a7de88?, {0xc0037795c0, 0x38}) test/e2e/framework/statefulset/rest.go:240 > k8s.io/kubernetes/test/e2e/apps.restoreHTTPProbe({0x801de88, 0xc001bcc1a0}, 0x0?) test/e2e/apps/statefulset.go:1728 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:718 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:30:56.014: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=statefulset-7967 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' Nov 26 03:30:56.390: INFO: rc: 1 Nov 26 03:30:56.390: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: Nov 26 03:30:56.517: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 11m20.469s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 11m20.044s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 5m15.174s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:31:06.588: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:31:16.577: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 11m40.472s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 11m40.047s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 5m35.176s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:31:26.582: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:31:36.595: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 12m0.474s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 12m0.048s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 5m55.178s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:31:46.583: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:31:56.575: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 12m20.476s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 12m20.051s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 6m15.181s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:32:06.566: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:32:16.598: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 12m40.478s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 12m40.052s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 6m35.182s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:32:26.576: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:32:36.604: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 13m0.48s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 13m0.055s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 6m55.185s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:32:46.580: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:32:56.612: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 13m20.483s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 13m20.057s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 7m15.187s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:33:06.569: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:33:16.581: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 13m40.486s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 13m40.061s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 7m35.19s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:33:26.582: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:33:36.572: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 14m0.489s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 14m0.064s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 7m55.194s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:33:46.584: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:33:56.583: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 14m20.492s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 14m20.067s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 8m15.196s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:34:06.571: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:34:16.586: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 14m40.495s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 14m40.069s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 8m35.199s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:34:26.701: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:34:36.569: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 15m0.497s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 15m0.071s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 8m55.201s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:34:46.600: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:34:56.590: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 15m20.499s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 15m20.073s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 9m15.203s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:35:06.561: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:35:16.598: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 15m40.501s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 15m40.075s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 9m35.205s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:35:26.564: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:35:36.589: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 16m0.503s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 16m0.077s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 9m55.207s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:35:46.561: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:35:56.580: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:35:56.580: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:35:56.580: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 16m20.505s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 16m20.079s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 10m15.209s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:36:06.563: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:06.563: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:06.563: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:36:16.581: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:16.581: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:16.581: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 16m40.507s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 16m40.081s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 10m35.211s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:36:26.584: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:26.584: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:26.584: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:36:36.575: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:36.575: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:36.575: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 17m0.509s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 17m0.083s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 10m55.213s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:36:46.616: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:46.616: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:46.616: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false Nov 26 03:36:56.605: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:56.605: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true Nov 26 03:36:56.605: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=false ------------------------------ Progress Report for Ginkgo Process #1 Automatically polling progress: [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] (Spec Runtime: 17m20.511s) test/e2e/apps/statefulset.go:697 In [It] (Node Runtime: 17m20.086s) test/e2e/apps/statefulset.go:697 At [By Step] Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-7967 (Step Runtime: 11m15.216s) test/e2e/apps/statefulset.go:717 Spec Goroutine goroutine 13224 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 > k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:37:06.558: INFO: Unexpected error: <*url.Error | 0xc004bccde0>: { Op: "Get", URL: "https://34.168.169.190/api/v1/namespaces/statefulset-7967/pods?labelSelector=baz%3Dblah%2Cfoo%3Dbar", Err: <*net.OpError | 0xc0049f1e00>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0053de930>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 169, 190], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc000a7eb20>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 03:37:06.558: FAIL: Get "https://34.168.169.190/api/v1/namespaces/statefulset-7967/pods?labelSelector=baz%3Dblah%2Cfoo%3Dbar": dial tcp 34.168.169.190:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x801de88, 0xc001bcc1a0}, 0xc0053b4f00) test/e2e/framework/statefulset/rest.go:69 +0x153 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1() test/e2e/framework/statefulset/wait.go:37 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0xc004b669d0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 +0xbd k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 +0x3d0 E1126 03:37:06.558817 10064 runtime.go:79] Observed a panic: types.GinkgoError{Heading:"Your Test Panicked", Message:"When you, or your assertion library, calls Ginkgo's Fail(),\nGinkgo panics to prevent subsequent assertions from running.\n\nNormally Ginkgo rescues this panic so you shouldn't see it.\n\nHowever, if you make an assertion in a goroutine, Ginkgo can't capture the panic.\nTo circumvent this, you should call\n\n\tdefer GinkgoRecover()\n\nat the top of the goroutine that caused this panic.\n\nAlternatively, you may have made an assertion outside of a Ginkgo\nleaf node (e.g. in a container node or some out-of-band function) - please move your assertion to\nan appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).", DocLink:"mental-model-how-ginkgo-handles-failure", CodeLocation:types.CodeLocation{FileName:"test/e2e/framework/statefulset/rest.go", LineNumber:69, FullStackTrace:"k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x801de88, 0xc001bcc1a0}, 0xc0053b4f00)\n\ttest/e2e/framework/statefulset/rest.go:69 +0x153\nk8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1()\n\ttest/e2e/framework/statefulset/wait.go:37 +0x4a\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0})\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0xc004b669d0?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a\nk8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?)\n\tvendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50\nk8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00)\n\ttest/e2e/framework/statefulset/wait.go:35 +0xbd\nk8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...)\n\ttest/e2e/framework/statefulset/wait.go:80\nk8s.io/kubernetes/test/e2e/apps.glob..func10.2.11()\n\ttest/e2e/apps/statefulset.go:719 +0x3d0", CustomMessage:""}} (�[1m�[38;5;9mYour Test Panicked�[0m �[38;5;243mtest/e2e/framework/statefulset/rest.go:69�[0m When you, or your assertion library, calls Ginkgo's Fail(), Ginkgo panics to prevent subsequent assertions from running. Normally Ginkgo rescues this panic so you shouldn't see it. However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. To circumvent this, you should call defer GinkgoRecover() at the top of the goroutine that caused this panic. Alternatively, you may have made an assertion outside of a Ginkgo leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...). �[1mLearn more at:�[0m �[38;5;14m�[4mhttp://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure�[0m ) goroutine 13224 [running]: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime.logPanic({0x70eb7e0?, 0xc000c32d90}) vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go:75 +0x99 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime.HandleCrash({0x0, 0x0, 0xc000c32d90?}) vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go:49 +0x75 panic({0x70eb7e0, 0xc000c32d90}) /usr/local/go/src/runtime/panic.go:884 +0x212 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc0017ca600, 0xb8}, {0xc000ff15a8?, 0x75b521a?, 0xc000ff15c8?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 +0x225 k8s.io/kubernetes/test/e2e/framework.Fail({0xc000344840, 0xa3}, {0xc000ff1640?, 0xc000344840?, 0xc000ff1668?}) test/e2e/framework/log.go:61 +0x145 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fadf60, 0xc004bccde0}, {0x0?, 0xc0045b0250?, 0x10?}) test/e2e/framework/expect.go:76 +0x267 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 k8s.io/kubernetes/test/e2e/framework/statefulset.GetPodList({0x801de88, 0xc001bcc1a0}, 0xc0053b4f00) test/e2e/framework/statefulset/rest.go:69 +0x153 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning.func1() test/e2e/framework/statefulset/wait.go:37 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 +0x1b k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0000820c8?}, 0xc004b669d0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 +0x57 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0053641b0, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 +0x10c k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0xf8?, 0x2fd9d05?, 0x20?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 +0x9a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x1?, 0xc000a7de48?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 +0x4a k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x801de88?, 0xc001bcc1a0?, 0xc000a7de88?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 +0x50 k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunning({0x801de88?, 0xc001bcc1a0}, 0x3, 0x3, 0xc0053b4f00) test/e2e/framework/statefulset/wait.go:35 +0xbd k8s.io/kubernetes/test/e2e/framework/statefulset.WaitForRunningAndReady(...) test/e2e/framework/statefulset/wait.go:80 k8s.io/kubernetes/test/e2e/apps.glob..func10.2.11() test/e2e/apps/statefulset.go:719 +0x3d0 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0038fb200}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 +0x1b k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 +0x98 created by k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 +0xe3d [AfterEach] Basic StatefulSet functionality [StatefulSetBasic] test/e2e/apps/statefulset.go:124 Nov 26 03:37:06.598: INFO: Deleting all statefulset in ns statefulset-7967 Nov 26 03:37:06.638: INFO: Unexpected error: <*url.Error | 0xc004bcd380>: { Op: "Get", URL: "https://34.168.169.190/apis/apps/v1/namespaces/statefulset-7967/statefulsets", Err: <*net.OpError | 0xc004f50050>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0053dec00>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 169, 190], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc000a7ef60>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 03:37:06.638: FAIL: Get "https://34.168.169.190/apis/apps/v1/namespaces/statefulset-7967/statefulsets": dial tcp 34.168.169.190:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/statefulset.DeleteAllStatefulSets({0x801de88, 0xc001bcc1a0}, {0xc001d7d0e0, 0x10}) test/e2e/framework/statefulset/rest.go:76 +0x113 k8s.io/kubernetes/test/e2e/apps.glob..func10.2.2() test/e2e/apps/statefulset.go:129 +0x1b2 [AfterEach] [sig-apps] StatefulSet test/e2e/framework/node/init/init.go:32 Nov 26 03:37:06.638: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-apps] StatefulSet test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-apps] StatefulSet dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 03:37:06.677 STEP: Collecting events from namespace "statefulset-7967". 11/26/22 03:37:06.678 Nov 26 03:37:06.717: INFO: Unexpected error: failed to list events in namespace "statefulset-7967": <*url.Error | 0xc004bcd8c0>: { Op: "Get", URL: "https://34.168.169.190/api/v1/namespaces/statefulset-7967/events", Err: <*net.OpError | 0xc004f503c0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc0005cea20>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 169, 190], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc000a7f2c0>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 03:37:06.717: FAIL: failed to list events in namespace "statefulset-7967": Get "https://34.168.169.190/api/v1/namespaces/statefulset-7967/events": dial tcp 34.168.169.190:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc000ff05c0, {0xc001d7d0e0, 0x10}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc001bcc1a0}, {0xc001d7d0e0, 0x10}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc000ff0650?, {0xc001d7d0e0?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc0003d94a0) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc001507b90?, 0xc0041f1fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0xc004a60d88?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc001507b90?, 0x29449fc?}, {0xae73300?, 0xc0041f1f80?, 0x0?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-apps] StatefulSet tear down framework | framework.go:193 STEP: Destroying namespace "statefulset-7967" for this suite. 11/26/22 03:37:06.718 Nov 26 03:37:06.757: FAIL: Couldn't delete ns: "statefulset-7967": Delete "https://34.168.169.190/api/v1/namespaces/statefulset-7967": dial tcp 34.168.169.190:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.169.190/api/v1/namespaces/statefulset-7967", Err:(*net.OpError)(0xc004a7ba40)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc0003d94a0) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc001507a40?, 0xc003e99fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc001507a40?, 0x0?}, {0xae73300?, 0x5?, 0xc002fe0c18?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-auth\]\sServiceAccounts\sshould\ssupport\sInClusterConfig\swith\stoken\srotation\s\[Slow\]$'
test/e2e/auth/service_accounts.go:520 k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:520 +0x9abfrom junit_01.xml
[BeforeEach] [sig-auth] ServiceAccounts set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 03:04:09.709 Nov 26 03:04:09.709: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename svcaccounts 11/26/22 03:04:09.711 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 03:04:09.898 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 03:04:09.986 [BeforeEach] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:31 [It] should support InClusterConfig with token rotation [Slow] test/e2e/auth/service_accounts.go:432 Nov 26 03:04:10.230: INFO: created pod Nov 26 03:04:10.230: INFO: Waiting up to 1m0s for 1 pods to be running and ready: [inclusterclient] Nov 26 03:04:10.230: INFO: Waiting up to 1m0s for pod "inclusterclient" in namespace "svcaccounts-5795" to be "running and ready" Nov 26 03:04:10.300: INFO: Pod "inclusterclient": Phase="Pending", Reason="", readiness=false. Elapsed: 70.222127ms Nov 26 03:04:10.300: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-8wdk' to be 'Running' but was 'Pending' Nov 26 03:04:12.341: INFO: Pod "inclusterclient": Phase="Pending", Reason="", readiness=false. Elapsed: 2.111530411s Nov 26 03:04:12.341: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-8wdk' to be 'Running' but was 'Pending' Nov 26 03:04:14.343: INFO: Pod "inclusterclient": Phase="Pending", Reason="", readiness=false. Elapsed: 4.11321002s Nov 26 03:04:14.343: INFO: Error evaluating pod condition running and ready: want pod 'inclusterclient' on 'bootstrap-e2e-minion-group-8wdk' to be 'Running' but was 'Pending' Nov 26 03:04:16.343: INFO: Pod "inclusterclient": Phase="Running", Reason="", readiness=true. Elapsed: 6.112798065s Nov 26 03:04:16.343: INFO: Pod "inclusterclient" satisfied condition "running and ready" Nov 26 03:04:16.343: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [inclusterclient] Nov 26 03:04:16.343: INFO: pod is ready Nov 26 03:05:16.343: INFO: polling logs Nov 26 03:05:16.447: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 Nov 26 03:06:16.344: INFO: polling logs Nov 26 03:06:16.434: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 Nov 26 03:07:16.344: INFO: polling logs Nov 26 03:08:08.325: INFO: Error pulling logs: an error on the server ("unknown") has prevented the request from succeeding (get pods inclusterclient) Nov 26 03:08:16.343: INFO: polling logs Nov 26 03:08:16.399: INFO: Error pulling logs: an error on the server ("unknown") has prevented the request from succeeding (get pods inclusterclient) ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 5m0.409s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 5m0s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:09:16.344: INFO: polling logs Nov 26 03:09:16.452: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 5m20.411s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 5m20.003s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 5m40.413s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 5m40.004s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 6m0.415s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 6m0.006s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select, 2 minutes] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:10:16.344: INFO: polling logs Nov 26 03:10:16.460: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 6m20.417s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 6m20.009s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 6m40.42s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 6m40.012s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 7m0.422s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 7m0.013s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:11:16.343: INFO: polling logs Nov 26 03:11:16.387: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 7m20.429s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 7m20.021s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 7m40.432s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 7m40.023s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 8m0.434s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 8m0.025s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select, 2 minutes] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:12:16.344: INFO: polling logs Nov 26 03:12:16.388: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 8m20.436s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 8m20.027s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 8m40.437s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 8m40.029s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 9m0.439s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 9m0.031s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:13:16.343: INFO: polling logs Nov 26 03:13:16.446: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 9m20.442s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 9m20.033s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 9m40.445s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 9m40.036s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 10m0.447s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 10m0.038s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select, 2 minutes] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:14:16.344: INFO: polling logs Nov 26 03:14:16.623: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 10m20.451s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 10m20.042s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 10m40.453s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 10m40.044s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 11m0.455s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 11m0.047s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:15:16.343: INFO: polling logs ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 11m20.457s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 11m20.048s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000d01e00, 0xc000b13700) vendor/golang.org/x/net/http2/transport.go:1200 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000223880, 0xc000b13700, {0xe0?}) vendor/golang.org/x/net/http2/transport.go:519 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:480 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc001857b80?}, 0xc000b13700?) vendor/golang.org/x/net/http2/transport.go:3020 net/http.(*Transport).roundTrip(0xc001857b80, 0xc000b13700) /usr/local/go/src/net/http/transport.go:540 net/http.(*Transport).RoundTrip(0x6fe4b20?, 0xc003712690?) /usr/local/go/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*bearerAuthRoundTripper).RoundTrip(0xc00350f2c0, 0xc000b13100) vendor/k8s.io/client-go/transport/round_trippers.go:317 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc003649b60, 0xc000b12b00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc000b12b00, {0x7fad100, 0xc003649b60}, {0x74d54e0?, 0x1?, 0x0?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc00350f2f0, 0xc000b12b00, {0x7f7a08e73108?, 0x100?, 0x0?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc00350f2f0, 0xc000b12b00) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000b12100, {0x7fe0bc8, 0xc00012e008}, 0x7f79d95088f0?) vendor/k8s.io/client-go/rest/request.go:964 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000b12100, {0x7fe0bc8, 0xc00012e008}) vendor/k8s.io/client-go/rest/request.go:1005 k8s.io/kubernetes/test/e2e/framework/pod.getPodLogsInternal({0x801de88?, 0xc001546680?}, {0xc0032e0a30, 0x10}, {0x75e13f6, 0xf}, {0x75e13f6, 0xf}, 0x0, 0x0, ...) test/e2e/framework/pod/resource.go:572 k8s.io/kubernetes/test/e2e/framework/pod.GetPodLogs(...) test/e2e/framework/pod/resource.go:543 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6.1() test/e2e/auth/service_accounts.go:505 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc00012e000?}, 0x262a61f?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:15:46.503: INFO: Error pulling logs: an error on the server ("unknown") has prevented the request from succeeding (get pods inclusterclient) ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 11m40.46s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 11m40.052s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 12m0.463s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 12m0.054s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select, 2 minutes] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:16:16.344: INFO: polling logs Nov 26 03:16:16.407: INFO: Error pulling logs: an error on the server ("unknown") has prevented the request from succeeding (get pods inclusterclient) ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 12m20.465s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 12m20.056s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 12m40.467s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 12m40.058s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 13m0.469s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 13m0.061s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:17:16.343: INFO: polling logs Nov 26 03:17:16.646: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 13m20.472s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 13m20.063s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 13m40.474s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 13m40.066s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 14m0.476s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 14m0.068s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select, 2 minutes] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:18:16.343: INFO: polling logs ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 14m20.479s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 14m20.07s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000d01e00, 0xc000b13700) vendor/golang.org/x/net/http2/transport.go:1200 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000223880, 0xc000b13700, {0xe0?}) vendor/golang.org/x/net/http2/transport.go:519 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:480 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc001857b80?}, 0xc000b13700?) vendor/golang.org/x/net/http2/transport.go:3020 net/http.(*Transport).roundTrip(0xc001857b80, 0xc000b13700) /usr/local/go/src/net/http/transport.go:540 net/http.(*Transport).RoundTrip(0x6fe4b20?, 0xc003712210?) /usr/local/go/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*bearerAuthRoundTripper).RoundTrip(0xc00350f2c0, 0xc000b13100) vendor/k8s.io/client-go/transport/round_trippers.go:317 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc003649b60, 0xc000b12b00) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc000b12b00, {0x7fad100, 0xc003649b60}, {0x74d54e0?, 0x1?, 0x0?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc00350f2f0, 0xc000b12b00, {0x7f7a08e73108?, 0x100?, 0x0?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc00350f2f0, 0xc000b12b00) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc000b12100, {0x7fe0bc8, 0xc00012e008}, 0x7f79d95088f0?) vendor/k8s.io/client-go/rest/request.go:964 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc000b12100, {0x7fe0bc8, 0xc00012e008}) vendor/k8s.io/client-go/rest/request.go:1005 k8s.io/kubernetes/test/e2e/framework/pod.getPodLogsInternal({0x801de88?, 0xc001546680?}, {0xc0032e0a30, 0x10}, {0x75e13f6, 0xf}, {0x75e13f6, 0xf}, 0x0, 0x0, ...) test/e2e/framework/pod/resource.go:572 k8s.io/kubernetes/test/e2e/framework/pod.GetPodLogs(...) test/e2e/framework/pod/resource.go:543 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6.1() test/e2e/auth/service_accounts.go:505 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc00012e000?}, 0x262a61f?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:18:46.423: INFO: Error pulling logs: an error on the server ("unknown") has prevented the request from succeeding (get pods inclusterclient) ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 14m40.481s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 14m40.073s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 15m0.484s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 15m0.075s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:19:16.343: INFO: polling logs Nov 26 03:19:16.500: INFO: Error pulling logs: an error on the server ("unknown") has prevented the request from succeeding (get pods inclusterclient) ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 15m20.486s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 15m20.077s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 15m40.488s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 15m40.079s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 16m0.489s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 16m0.081s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select, 2 minutes] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:20:16.344: INFO: polling logs Nov 26 03:20:16.450: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 16m20.491s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 16m20.083s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 16m40.493s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 16m40.085s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 17m0.495s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 17m0.086s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:21:16.344: INFO: polling logs Nov 26 03:21:16.408: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 17m20.497s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 17m20.089s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 17m40.5s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 17m40.091s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 18m0.502s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 18m0.093s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select, 2 minutes] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:22:16.343: INFO: polling logs ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 18m20.504s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 18m20.095s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000d01e00, 0xc0017c2600) vendor/golang.org/x/net/http2/transport.go:1200 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000223880, 0xc0017c2600, {0xe0?}) vendor/golang.org/x/net/http2/transport.go:519 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:480 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc001857b80?}, 0xc0017c2600?) vendor/golang.org/x/net/http2/transport.go:3020 net/http.(*Transport).roundTrip(0xc001857b80, 0xc0017c2600) /usr/local/go/src/net/http/transport.go:540 net/http.(*Transport).RoundTrip(0x6fe4b20?, 0xc003406330?) /usr/local/go/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*bearerAuthRoundTripper).RoundTrip(0xc00350f2c0, 0xc0017c2500) vendor/k8s.io/client-go/transport/round_trippers.go:317 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc003649b60, 0xc0017c2400) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017c2400, {0x7fad100, 0xc003649b60}, {0x74d54e0?, 0x1?, 0x0?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc00350f2f0, 0xc0017c2400, {0x7f7a08e735b8?, 0x100?, 0x0?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc00350f2f0, 0xc0017c2400) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0017c2200, {0x7fe0bc8, 0xc00012e008}, 0x7f79d9508d30?) vendor/k8s.io/client-go/rest/request.go:964 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0017c2200, {0x7fe0bc8, 0xc00012e008}) vendor/k8s.io/client-go/rest/request.go:1005 k8s.io/kubernetes/test/e2e/framework/pod.getPodLogsInternal({0x801de88?, 0xc001546680?}, {0xc0032e0a30, 0x10}, {0x75e13f6, 0xf}, {0x75e13f6, 0xf}, 0x0, 0x0, ...) test/e2e/framework/pod/resource.go:572 k8s.io/kubernetes/test/e2e/framework/pod.GetPodLogs(...) test/e2e/framework/pod/resource.go:543 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6.1() test/e2e/auth/service_accounts.go:505 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc00012e000?}, 0x262a61f?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:22:46.407: INFO: Error pulling logs: an error on the server ("unknown") has prevented the request from succeeding (get pods inclusterclient) ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 18m40.507s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 18m40.098s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 19m0.512s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 19m0.103s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:23:16.344: INFO: polling logs ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 19m20.514s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 19m20.105s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000d01e00, 0xc0017c2b00) vendor/golang.org/x/net/http2/transport.go:1200 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000223880, 0xc0017c2b00, {0xe0?}) vendor/golang.org/x/net/http2/transport.go:519 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:480 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc001857b80?}, 0xc0017c2b00?) vendor/golang.org/x/net/http2/transport.go:3020 net/http.(*Transport).roundTrip(0xc001857b80, 0xc0017c2b00) /usr/local/go/src/net/http/transport.go:540 net/http.(*Transport).RoundTrip(0x6fe4b20?, 0xc003406720?) /usr/local/go/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*bearerAuthRoundTripper).RoundTrip(0xc00350f2c0, 0xc0017c2a00) vendor/k8s.io/client-go/transport/round_trippers.go:317 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc003649b60, 0xc0017c2900) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017c2900, {0x7fad100, 0xc003649b60}, {0x74d54e0?, 0x1?, 0x0?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc00350f2f0, 0xc0017c2900, {0x7f7a08e735b8?, 0x100?, 0x0?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc00350f2f0, 0xc0017c2900) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0017c2700, {0x7fe0bc8, 0xc00012e008}, 0x7f79d9508d30?) vendor/k8s.io/client-go/rest/request.go:964 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0017c2700, {0x7fe0bc8, 0xc00012e008}) vendor/k8s.io/client-go/rest/request.go:1005 k8s.io/kubernetes/test/e2e/framework/pod.getPodLogsInternal({0x801de88?, 0xc001546680?}, {0xc0032e0a30, 0x10}, {0x75e13f6, 0xf}, {0x75e13f6, 0xf}, 0x0, 0x0, ...) test/e2e/framework/pod/resource.go:572 k8s.io/kubernetes/test/e2e/framework/pod.GetPodLogs(...) test/e2e/framework/pod/resource.go:543 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6.1() test/e2e/auth/service_accounts.go:505 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc00012e000?}, 0x262a61f?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:23:46.402: INFO: Error pulling logs: an error on the server ("unknown") has prevented the request from succeeding (get pods inclusterclient) ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 19m40.517s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 19m40.108s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 20m0.519s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 20m0.11s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select, 2 minutes] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:24:16.343: INFO: polling logs ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 20m20.521s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 20m20.113s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000d01e00, 0xc0017c2600) vendor/golang.org/x/net/http2/transport.go:1200 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000223880, 0xc0017c2600, {0xe0?}) vendor/golang.org/x/net/http2/transport.go:519 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:480 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc001857b80?}, 0xc0017c2600?) vendor/golang.org/x/net/http2/transport.go:3020 net/http.(*Transport).roundTrip(0xc001857b80, 0xc0017c2600) /usr/local/go/src/net/http/transport.go:540 net/http.(*Transport).RoundTrip(0x6fe4b20?, 0xc0052d22a0?) /usr/local/go/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*bearerAuthRoundTripper).RoundTrip(0xc00350f2c0, 0xc0017c2500) vendor/k8s.io/client-go/transport/round_trippers.go:317 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc003649b60, 0xc0017c2400) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017c2400, {0x7fad100, 0xc003649b60}, {0x74d54e0?, 0x1?, 0x0?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc00350f2f0, 0xc0017c2400, {0x7f7a08e735b8?, 0x100?, 0x0?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc00350f2f0, 0xc0017c2400) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0017c2200, {0x7fe0bc8, 0xc00012e008}, 0x7f79d95088f0?) vendor/k8s.io/client-go/rest/request.go:964 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0017c2200, {0x7fe0bc8, 0xc00012e008}) vendor/k8s.io/client-go/rest/request.go:1005 k8s.io/kubernetes/test/e2e/framework/pod.getPodLogsInternal({0x801de88?, 0xc001546680?}, {0xc0032e0a30, 0x10}, {0x75e13f6, 0xf}, {0x75e13f6, 0xf}, 0x0, 0x0, ...) test/e2e/framework/pod/resource.go:572 k8s.io/kubernetes/test/e2e/framework/pod.GetPodLogs(...) test/e2e/framework/pod/resource.go:543 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6.1() test/e2e/auth/service_accounts.go:505 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc00012e000?}, 0x262a61f?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:24:46.432: INFO: Error pulling logs: an error on the server ("unknown") has prevented the request from succeeding (get pods inclusterclient) Nov 26 03:24:46.432: INFO: polling logs ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 20m40.524s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 20m40.116s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000d01e00, 0xc0017c2b00) vendor/golang.org/x/net/http2/transport.go:1200 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000223880, 0xc0017c2b00, {0xe0?}) vendor/golang.org/x/net/http2/transport.go:519 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:480 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc001857b80?}, 0xc0017c2b00?) vendor/golang.org/x/net/http2/transport.go:3020 net/http.(*Transport).roundTrip(0xc001857b80, 0xc0017c2b00) /usr/local/go/src/net/http/transport.go:540 net/http.(*Transport).RoundTrip(0x6fe4b20?, 0xc0052d2870?) /usr/local/go/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*bearerAuthRoundTripper).RoundTrip(0xc00350f2c0, 0xc0017c2a00) vendor/k8s.io/client-go/transport/round_trippers.go:317 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc003649b60, 0xc0017c2900) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017c2900, {0x7fad100, 0xc003649b60}, {0x74d54e0?, 0x1?, 0x0?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc00350f2f0, 0xc0017c2900, {0x7f7a08e735b8?, 0x100?, 0x0?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc00350f2f0, 0xc0017c2900) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0017c2700, {0x7fe0bc8, 0xc00012e008}, 0x7f79d95088f0?) vendor/k8s.io/client-go/rest/request.go:964 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0017c2700, {0x7fe0bc8, 0xc00012e008}) vendor/k8s.io/client-go/rest/request.go:1005 k8s.io/kubernetes/test/e2e/framework/pod.getPodLogsInternal({0x801de88?, 0xc001546680?}, {0xc0032e0a30, 0x10}, {0x75e13f6, 0xf}, {0x75e13f6, 0xf}, 0x0, 0x0, ...) test/e2e/framework/pod/resource.go:572 k8s.io/kubernetes/test/e2e/framework/pod.GetPodLogs(...) test/e2e/framework/pod/resource.go:543 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6.1() test/e2e/auth/service_accounts.go:505 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc00012e000?}, 0x262a61f?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ ------------------------------ Progress Report for Ginkgo Process #5 Automatically polling progress: [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] (Spec Runtime: 21m0.528s) test/e2e/auth/service_accounts.go:432 In [It] (Node Runtime: 21m0.12s) test/e2e/auth/service_accounts.go:432 Spec Goroutine goroutine 3469 [select] k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*ClientConn).RoundTrip(0xc000d01e00, 0xc0017c2b00) vendor/golang.org/x/net/http2/transport.go:1200 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTripOpt(0xc000223880, 0xc0017c2b00, {0xe0?}) vendor/golang.org/x/net/http2/transport.go:519 k8s.io/kubernetes/vendor/golang.org/x/net/http2.(*Transport).RoundTrip(...) vendor/golang.org/x/net/http2/transport.go:480 k8s.io/kubernetes/vendor/golang.org/x/net/http2.noDialH2RoundTripper.RoundTrip({0xc001857b80?}, 0xc0017c2b00?) vendor/golang.org/x/net/http2/transport.go:3020 net/http.(*Transport).roundTrip(0xc001857b80, 0xc0017c2b00) /usr/local/go/src/net/http/transport.go:540 net/http.(*Transport).RoundTrip(0x6fe4b20?, 0xc0052d2870?) /usr/local/go/src/net/http/roundtrip.go:17 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*bearerAuthRoundTripper).RoundTrip(0xc00350f2c0, 0xc0017c2a00) vendor/k8s.io/client-go/transport/round_trippers.go:317 k8s.io/kubernetes/vendor/k8s.io/client-go/transport.(*userAgentRoundTripper).RoundTrip(0xc003649b60, 0xc0017c2900) vendor/k8s.io/client-go/transport/round_trippers.go:168 net/http.send(0xc0017c2900, {0x7fad100, 0xc003649b60}, {0x74d54e0?, 0x1?, 0x0?}) /usr/local/go/src/net/http/client.go:251 net/http.(*Client).send(0xc00350f2f0, 0xc0017c2900, {0x7f7a08e735b8?, 0x100?, 0x0?}) /usr/local/go/src/net/http/client.go:175 net/http.(*Client).do(0xc00350f2f0, 0xc0017c2900) /usr/local/go/src/net/http/client.go:715 net/http.(*Client).Do(...) /usr/local/go/src/net/http/client.go:581 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).request(0xc0017c2700, {0x7fe0bc8, 0xc00012e008}, 0x7f79d95088f0?) vendor/k8s.io/client-go/rest/request.go:964 k8s.io/kubernetes/vendor/k8s.io/client-go/rest.(*Request).Do(0xc0017c2700, {0x7fe0bc8, 0xc00012e008}) vendor/k8s.io/client-go/rest/request.go:1005 k8s.io/kubernetes/test/e2e/framework/pod.getPodLogsInternal({0x801de88?, 0xc001546680?}, {0xc0032e0a30, 0x10}, {0x75e13f6, 0xf}, {0x75e13f6, 0xf}, 0x0, 0x0, ...) test/e2e/framework/pod/resource.go:572 k8s.io/kubernetes/test/e2e/framework/pod.GetPodLogs(...) test/e2e/framework/pod/resource.go:543 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6.1() test/e2e/auth/service_accounts.go:505 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc00012e000?}, 0x262a61f?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc00012e000}, 0xc0036e5740, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc00012e000}, 0xb8?, 0x2fd9d05?, 0x18?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc00012e000}, 0x75b521a?, 0xc0040cde08?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x75b6f82?, 0x4?, 0x75d300d?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 > k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:503 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0xc00223c990, 0xc004188f00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:25:12.151: INFO: Retrying. Still waiting to see more unique tokens: got=1, want=2 Nov 26 03:25:12.151: FAIL: Unexpected error: timed out waiting for the condition I1126 03:04:11.708311 1 main.go:61] started I1126 03:04:41.713111 1 main.go:79] calling /healthz I1126 03:04:41.713593 1 main.go:96] authz_header=uMMuStWLQW0rgsubfqK3RiLre-hTpaHNPFWfY3KVoAk I1126 03:05:11.710134 1 main.go:79] calling /healthz I1126 03:05:11.710370 1 main.go:96] authz_header=uMMuStWLQW0rgsubfqK3RiLre-hTpaHNPFWfY3KVoAk Full Stack Trace k8s.io/kubernetes/test/e2e/auth.glob..func5.6() test/e2e/auth/service_accounts.go:520 +0x9ab [AfterEach] [sig-auth] ServiceAccounts test/e2e/framework/node/init/init.go:32 Nov 26 03:25:12.151: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-auth] ServiceAccounts test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-auth] ServiceAccounts dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 03:25:12.248 STEP: Collecting events from namespace "svcaccounts-5795". 11/26/22 03:25:12.248 STEP: Found 5 events. 11/26/22 03:25:12.337 Nov 26 03:25:12.337: INFO: At 2022-11-26 03:04:10 +0000 UTC - event for inclusterclient: {default-scheduler } Scheduled: Successfully assigned svcaccounts-5795/inclusterclient to bootstrap-e2e-minion-group-8wdk Nov 26 03:25:12.337: INFO: At 2022-11-26 03:04:11 +0000 UTC - event for inclusterclient: {kubelet bootstrap-e2e-minion-group-8wdk} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:25:12.337: INFO: At 2022-11-26 03:04:11 +0000 UTC - event for inclusterclient: {kubelet bootstrap-e2e-minion-group-8wdk} Created: Created container inclusterclient Nov 26 03:25:12.337: INFO: At 2022-11-26 03:04:11 +0000 UTC - event for inclusterclient: {kubelet bootstrap-e2e-minion-group-8wdk} Started: Started container inclusterclient Nov 26 03:25:12.337: INFO: At 2022-11-26 03:05:40 +0000 UTC - event for inclusterclient: {kubelet bootstrap-e2e-minion-group-8wdk} Killing: Stopping container inclusterclient Nov 26 03:25:12.559: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 03:25:12.559: INFO: inclusterclient bootstrap-e2e-minion-group-8wdk Failed [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:04:10 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:41 +0000 UTC PodFailed } {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:41 +0000 UTC PodFailed } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:04:10 +0000 UTC }] Nov 26 03:25:12.559: INFO: Nov 26 03:25:12.948: INFO: Logging node info for node bootstrap-e2e-master Nov 26 03:25:13.015: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master 6434c658-db87-4566-9960-c594435d7ea0 15625 0 2022-11-26 02:57:42 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:23:35 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:23:35 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:23:35 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:23:35 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:23:35 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.169.190,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5736e6f149167618f71cd530dafef4cc,SystemUUID:5736e6f1-4916-7618-f71c-d530dafef4cc,BootID:aec7342f-3939-425a-bcb1-13b86fb32845,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:25:13.015: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 03:25:13.104: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 03:25:13.355: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:13.355: INFO: Container etcd-container ready: true, restart count 2 Nov 26 03:25:13.355: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-26 02:57:14 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:13.355: INFO: Container kube-addon-manager ready: true, restart count 1 Nov 26 03:25:13.355: INFO: metadata-proxy-v0.1-zcw5j started at 2022-11-26 02:57:48 +0000 UTC (0+2 container statuses recorded) Nov 26 03:25:13.355: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:25:13.355: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:25:13.355: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:13.355: INFO: Container kube-apiserver ready: true, restart count 0 Nov 26 03:25:13.355: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:13.355: INFO: Container kube-controller-manager ready: true, restart count 5 Nov 26 03:25:13.355: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:13.355: INFO: Container etcd-container ready: true, restart count 2 Nov 26 03:25:13.355: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:13.355: INFO: Container kube-scheduler ready: true, restart count 3 Nov 26 03:25:13.355: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-26 02:57:14 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:13.355: INFO: Container l7-lb-controller ready: true, restart count 7 Nov 26 03:25:13.355: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:13.355: INFO: Container konnectivity-server-container ready: true, restart count 6 Nov 26 03:25:13.985: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 03:25:13.985: INFO: Logging node info for node bootstrap-e2e-minion-group-7rps Nov 26 03:25:14.106: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-7rps d3085899-95ef-4dfa-ae30-feaa3b8cf547 15454 0 2022-11-26 02:57:40 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-7rps kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-7rps topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-859":"bootstrap-e2e-minion-group-7rps"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 03:22:10 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:22:10 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status} {node-problem-detector Update v1 2022-11-26 03:22:47 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-7rps,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:10 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:10 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:10 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:22:10 +0000 UTC,LastTransitionTime:2022-11-26 02:57:42 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.145.67.56,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:25dabe18cf1d9ba1fb46b48496913a34,SystemUUID:25dabe18-cf1d-9ba1-fb46-b48496913a34,BootID:cc4a1118-0d4a-44da-b481-07a484a9d681,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:25:14.106: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-7rps Nov 26 03:25:14.235: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-7rps Nov 26 03:25:14.450: INFO: pod-d160ca42-832c-4ddf-b18b-b64476b424a2 started at 2022-11-26 03:08:50 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:14.450: INFO: Container write-pod ready: false, restart count 0 Nov 26 03:25:14.450: INFO: ss-0 started at 2022-11-26 03:19:46 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:14.450: INFO: Container webserver ready: false, restart count 5 Nov 26 03:25:14.450: INFO: metrics-server-v0.5.2-867b8754b9-zrdj7 started at 2022-11-26 02:58:13 +0000 UTC (0+2 container statuses recorded) Nov 26 03:25:14.450: INFO: Container metrics-server ready: false, restart count 8 Nov 26 03:25:14.450: INFO: Container metrics-server-nanny ready: false, restart count 9 Nov 26 03:25:14.450: INFO: lb-sourcerange-6wkl4 started at 2022-11-26 03:10:28 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:14.450: INFO: Container netexec ready: true, restart count 6 Nov 26 03:25:14.450: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:20:58 +0000 UTC (0+7 container statuses recorded) Nov 26 03:25:14.450: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 03:25:14.450: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 03:25:14.450: INFO: Container csi-resizer ready: true, restart count 3 Nov 26 03:25:14.450: INFO: Container csi-snapshotter ready: true, restart count 3 Nov 26 03:25:14.450: INFO: Container hostpath ready: true, restart count 3 Nov 26 03:25:14.450: INFO: Container liveness-probe ready: true, restart count 3 Nov 26 03:25:14.450: INFO: Container node-driver-registrar ready: true, restart count 3 Nov 26 03:25:14.450: INFO: pod-secrets-776ddd5d-115d-4fda-9e3c-036bbaedf0be started at 2022-11-26 03:19:21 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:14.450: INFO: Container creates-volume-test ready: false, restart count 0 Nov 26 03:25:14.450: INFO: pod-configmaps-0b0bef5d-d35f-45da-b262-991f846c4033 started at 2022-11-26 03:20:12 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:14.450: INFO: Container agnhost-container ready: false, restart count 0 Nov 26 03:25:14.450: INFO: metadata-proxy-v0.1-swmbn started at 2022-11-26 02:57:41 +0000 UTC (0+2 container statuses recorded) Nov 26 03:25:14.450: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:25:14.450: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:25:14.450: INFO: kube-proxy-bootstrap-e2e-minion-group-7rps started at 2022-11-26 02:57:40 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:14.450: INFO: Container kube-proxy ready: true, restart count 8 Nov 26 03:25:14.450: INFO: netserver-0 started at 2022-11-26 03:21:51 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:14.450: INFO: Container webserver ready: true, restart count 2 Nov 26 03:25:14.450: INFO: csi-mockplugin-0 started at 2022-11-26 03:21:45 +0000 UTC (0+4 container statuses recorded) Nov 26 03:25:14.450: INFO: Container busybox ready: true, restart count 3 Nov 26 03:25:14.450: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 03:25:14.450: INFO: Container driver-registrar ready: false, restart count 4 Nov 26 03:25:14.450: INFO: Container mock ready: false, restart count 4 Nov 26 03:25:14.450: INFO: konnectivity-agent-q9wlj started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:14.450: INFO: Container konnectivity-agent ready: true, restart count 8 Nov 26 03:25:14.930: INFO: Latency metrics for node bootstrap-e2e-minion-group-7rps Nov 26 03:25:14.930: INFO: Logging node info for node bootstrap-e2e-minion-group-8wdk Nov 26 03:25:15.275: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-8wdk 99654f41-ed96-4b06-a6c8-1db40d65e751 16025 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-8wdk kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-8wdk topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-2914":"bootstrap-e2e-minion-group-8wdk"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 03:19:24 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {node-problem-detector Update v1 2022-11-26 03:22:51 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 03:25:09 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-8wdk,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:24:37 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:24:37 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:24:37 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:24:37 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.227.133,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7f88ac496457f212a2a8dc4997301551,SystemUUID:7f88ac49-6457-f212-a2a8-dc4997301551,BootID:691b96ec-62db-4c87-94b3-0915996e979c,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-provisioning-2914^1f225fd4-6d39-11ed-b0ed-7e8773e2527a],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-2914^1f225fd4-6d39-11ed-b0ed-7e8773e2527a,DevicePath:,},},Config:nil,},} Nov 26 03:25:15.276: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-8wdk Nov 26 03:25:15.326: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-8wdk Nov 26 03:25:15.576: INFO: execpod-dropjvg4x started at 2022-11-26 03:10:25 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container agnhost-container ready: true, restart count 6 Nov 26 03:25:15.576: INFO: metadata-proxy-v0.1-fzfwr started at 2022-11-26 02:57:45 +0000 UTC (0+2 container statuses recorded) Nov 26 03:25:15.576: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:25:15.576: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:25:15.576: INFO: coredns-6d97d5ddb-6l6bj started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container coredns ready: true, restart count 8 Nov 26 03:25:15.576: INFO: konnectivity-agent-v4t28 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container konnectivity-agent ready: false, restart count 8 Nov 26 03:25:15.576: INFO: csi-mockplugin-0 started at 2022-11-26 03:13:53 +0000 UTC (0+4 container statuses recorded) Nov 26 03:25:15.576: INFO: Container busybox ready: true, restart count 5 Nov 26 03:25:15.576: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 03:25:15.576: INFO: Container driver-registrar ready: true, restart count 5 Nov 26 03:25:15.576: INFO: Container mock ready: true, restart count 5 Nov 26 03:25:15.576: INFO: volume-prep-provisioning-2914 started at 2022-11-26 03:19:23 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container init-volume-provisioning-2914 ready: false, restart count 0 Nov 26 03:25:15.576: INFO: pod-cb1f5910-f737-44a4-bc9d-1e9f85571f6f started at 2022-11-26 03:21:45 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container write-pod ready: false, restart count 0 Nov 26 03:25:15.576: INFO: affinity-lb-esipp-r5p2x started at 2022-11-26 03:25:14 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container affinity-lb-esipp ready: false, restart count 0 Nov 26 03:25:15.576: INFO: kube-dns-autoscaler-5f6455f985-54xp5 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container autoscaler ready: true, restart count 8 Nov 26 03:25:15.576: INFO: l7-default-backend-8549d69d99-kgcjh started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 03:25:15.576: INFO: volume-snapshot-controller-0 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container volume-snapshot-controller ready: false, restart count 7 Nov 26 03:25:15.576: INFO: netserver-1 started at 2022-11-26 03:21:51 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container webserver ready: true, restart count 1 Nov 26 03:25:15.576: INFO: pvc-volume-tester-j7n8g started at 2022-11-26 03:16:40 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container volume-tester ready: false, restart count 0 Nov 26 03:25:15.576: INFO: kube-proxy-bootstrap-e2e-minion-group-8wdk started at 2022-11-26 02:57:44 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container kube-proxy ready: false, restart count 8 Nov 26 03:25:15.576: INFO: execpod-acceptrr72x started at 2022-11-26 03:10:23 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container agnhost-container ready: false, restart count 4 Nov 26 03:25:15.576: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:19:18 +0000 UTC (0+7 container statuses recorded) Nov 26 03:25:15.576: INFO: Container csi-attacher ready: true, restart count 6 Nov 26 03:25:15.576: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 03:25:15.576: INFO: Container csi-resizer ready: true, restart count 6 Nov 26 03:25:15.576: INFO: Container csi-snapshotter ready: true, restart count 6 Nov 26 03:25:15.576: INFO: Container hostpath ready: true, restart count 6 Nov 26 03:25:15.576: INFO: Container liveness-probe ready: true, restart count 6 Nov 26 03:25:15.576: INFO: Container node-driver-registrar ready: true, restart count 6 Nov 26 03:25:15.576: INFO: inclusterclient started at 2022-11-26 03:04:10 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:15.576: INFO: Container inclusterclient ready: false, restart count 0 Nov 26 03:25:15.992: INFO: Latency metrics for node bootstrap-e2e-minion-group-8wdk Nov 26 03:25:15.992: INFO: Logging node info for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:25:16.123: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-h8k8 d6f1c0c9-6047-409c-8381-ed8be6457456 16045 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-h8k8 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-h8k8 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-742":"bootstrap-e2e-minion-group-h8k8"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:45 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 03:17:33 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 03:22:51 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 03:25:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-h8k8,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:48 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:48 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:48 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:22:48 +0000 UTC,LastTransitionTime:2022-11-26 02:57:45 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.177.45,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5d45caccbe73d6049c0c7b580c44ebb3,SystemUUID:5d45cacc-be73-d604-9c0c-7b580c44ebb3,BootID:f77edcc1-2ac4-4561-a5dd-42b928bec3f2,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:25:16.124: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:25:16.220: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-h8k8 Nov 26 03:25:16.337: INFO: csi-mockplugin-0 started at 2022-11-26 03:15:13 +0000 UTC (0+3 container statuses recorded) Nov 26 03:25:16.338: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 03:25:16.338: INFO: Container driver-registrar ready: false, restart count 6 Nov 26 03:25:16.338: INFO: Container mock ready: false, restart count 6 Nov 26 03:25:16.338: INFO: konnectivity-agent-t5nvv started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container konnectivity-agent ready: false, restart count 7 Nov 26 03:25:16.338: INFO: back-off-cap started at 2022-11-26 02:59:37 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container back-off-cap ready: false, restart count 9 Nov 26 03:25:16.338: INFO: host-test-container-pod started at 2022-11-26 03:22:21 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 03:25:16.338: INFO: affinity-lb-esipp-h4vsd started at 2022-11-26 03:25:14 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container affinity-lb-esipp ready: false, restart count 0 Nov 26 03:25:16.338: INFO: external-local-pods-td9g8 started at 2022-11-26 03:21:45 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container netexec ready: false, restart count 2 Nov 26 03:25:16.338: INFO: kube-proxy-bootstrap-e2e-minion-group-h8k8 started at 2022-11-26 02:57:44 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container kube-proxy ready: false, restart count 8 Nov 26 03:25:16.338: INFO: metadata-proxy-v0.1-9vqbj started at 2022-11-26 02:57:45 +0000 UTC (0+2 container statuses recorded) Nov 26 03:25:16.338: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:25:16.338: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:25:16.338: INFO: coredns-6d97d5ddb-x27zq started at 2022-11-26 02:58:05 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container coredns ready: false, restart count 8 Nov 26 03:25:16.338: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 03:15:13 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container csi-attacher ready: false, restart count 2 Nov 26 03:25:16.338: INFO: pause-pod-deployment-648855d779-zz4dx started at 2022-11-26 03:21:48 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container agnhost-pause ready: true, restart count 1 Nov 26 03:25:16.338: INFO: netserver-2 started at 2022-11-26 03:21:51 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container webserver ready: false, restart count 3 Nov 26 03:25:16.338: INFO: test-container-pod started at 2022-11-26 03:22:21 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container webserver ready: true, restart count 0 Nov 26 03:25:16.338: INFO: csi-mockplugin-0 started at 2022-11-26 03:16:56 +0000 UTC (0+3 container statuses recorded) Nov 26 03:25:16.338: INFO: Container csi-provisioner ready: true, restart count 3 Nov 26 03:25:16.338: INFO: Container driver-registrar ready: true, restart count 3 Nov 26 03:25:16.338: INFO: Container mock ready: true, restart count 3 Nov 26 03:25:16.338: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 03:16:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:25:16.338: INFO: Container csi-attacher ready: true, restart count 3 Nov 26 03:25:16.935: INFO: Latency metrics for node bootstrap-e2e-minion-group-h8k8 [DeferCleanup (Each)] [sig-auth] ServiceAccounts tear down framework | framework.go:193 STEP: Destroying namespace "svcaccounts-5795" for this suite. 11/26/22 03:25:16.935
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-cli\]\sKubectl\sclient\sSimple\spod\sshould\sreturn\scommand\sexit\scodes\s\[Slow\]\srunning\sa\sfailing\scommand\swith\s\-\-leave\-stdin\-open$'
test/e2e/kubectl/kubectl.go:589 k8s.io/kubernetes/test/e2e/kubectl.glob..func1.8.7.7() test/e2e/kubectl/kubectl.go:589 +0x22dfrom junit_01.xml
[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 03:19:16.257 Nov 26 03:19:16.257: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename kubectl 11/26/22 03:19:16.259 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 03:19:16.469 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 03:19:16.567 [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 [BeforeEach] Simple pod test/e2e/kubectl/kubectl.go:411 STEP: creating the pod from 11/26/22 03:19:16.664 Nov 26 03:19:16.664: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-5762 create -f -' Nov 26 03:19:17.197: INFO: stderr: "" Nov 26 03:19:17.197: INFO: stdout: "pod/httpd created\n" Nov 26 03:19:17.197: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [httpd] Nov 26 03:19:17.197: INFO: Waiting up to 5m0s for pod "httpd" in namespace "kubectl-5762" to be "running and ready" Nov 26 03:19:17.319: INFO: Pod "httpd": Phase="Pending", Reason="", readiness=false. Elapsed: 121.848317ms Nov 26 03:19:17.319: INFO: Error evaluating pod condition running and ready: want pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' to be 'Running' but was 'Pending' Nov 26 03:19:19.372: INFO: Pod "httpd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.174814497s Nov 26 03:19:19.372: INFO: Error evaluating pod condition running and ready: want pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' to be 'Running' but was 'Pending' Nov 26 03:19:21.435: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 4.237630036s Nov 26 03:19:21.435: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:23.423: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 6.225772115s Nov 26 03:19:23.423: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:25.424: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 8.226854948s Nov 26 03:19:25.424: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:27.402: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 10.204934192s Nov 26 03:19:27.402: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:29.463: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 12.265694947s Nov 26 03:19:29.463: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:31.384: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 14.187084162s Nov 26 03:19:31.385: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:33.466: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 16.268948614s Nov 26 03:19:33.466: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:35.417: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 18.219537873s Nov 26 03:19:35.417: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:37.388: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 20.190376346s Nov 26 03:19:37.388: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:39.391: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 22.193731953s Nov 26 03:19:39.391: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:41.395: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 24.19717475s Nov 26 03:19:41.395: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:43.435: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 26.23737428s Nov 26 03:19:43.435: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:45.383: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 28.185590228s Nov 26 03:19:45.383: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:47.378: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 30.180932296s Nov 26 03:19:47.378: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:49.397: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 32.199339058s Nov 26 03:19:49.397: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:51.403: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 34.205498864s Nov 26 03:19:51.403: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:53.418: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 36.221098978s Nov 26 03:19:53.419: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:55.388: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 38.190239975s Nov 26 03:19:55.388: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:57.389: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 40.191670083s Nov 26 03:19:57.389: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:29 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:17 +0000 UTC }] Nov 26 03:19:59.467: INFO: Pod "httpd": Phase="Running", Reason="", readiness=true. Elapsed: 42.269587106s Nov 26 03:19:59.467: INFO: Pod "httpd" satisfied condition "running and ready" Nov 26 03:19:59.467: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [httpd] [It] [Slow] running a failing command with --leave-stdin-open test/e2e/kubectl/kubectl.go:585 Nov 26 03:19:59.467: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-5762 run -i --image=registry.k8s.io/e2e-test-images/busybox:1.29-4 --restart=Never --pod-running-timeout=2m0s failure-4 --leave-stdin-open -- /bin/sh -c exit 42' Nov 26 03:20:03.541: INFO: rc: 1 Nov 26 03:20:03.541: INFO: Unexpected error: <exec.CodeExitError>: { Err: <*errors.errorString | 0xc000f017a0>{ s: "error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-5762 run -i --image=registry.k8s.io/e2e-test-images/busybox:1.29-4 --restart=Never --pod-running-timeout=2m0s failure-4 --leave-stdin-open -- /bin/sh -c exit 42:\nCommand stdout:\n\nstderr:\nError from server: Get \"https://10.138.0.4:10250/containerLogs/kubectl-5762/failure-4/failure-4\": No agent available\n\nerror:\nexit status 1", }, Code: 1, } Nov 26 03:20:03.541: FAIL: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-5762 run -i --image=registry.k8s.io/e2e-test-images/busybox:1.29-4 --restart=Never --pod-running-timeout=2m0s failure-4 --leave-stdin-open -- /bin/sh -c exit 42: Command stdout: stderr: Error from server: Get "https://10.138.0.4:10250/containerLogs/kubectl-5762/failure-4/failure-4": No agent available error: exit status 1 Full Stack Trace k8s.io/kubernetes/test/e2e/kubectl.glob..func1.8.7.7() test/e2e/kubectl/kubectl.go:589 +0x22d [AfterEach] Simple pod test/e2e/kubectl/kubectl.go:417 STEP: using delete to clean up resources 11/26/22 03:20:03.541 Nov 26 03:20:03.542: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-5762 delete --grace-period=0 --force -f -' Nov 26 03:20:03.962: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" Nov 26 03:20:03.962: INFO: stdout: "pod \"httpd\" force deleted\n" Nov 26 03:20:03.962: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-5762 get rc,svc -l name=httpd --no-headers' Nov 26 03:20:04.305: INFO: stderr: "No resources found in kubectl-5762 namespace.\n" Nov 26 03:20:04.305: INFO: stdout: "" Nov 26 03:20:04.305: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-5762 get pods -l name=httpd -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' Nov 26 03:20:04.721: INFO: stderr: "" Nov 26 03:20:04.721: INFO: stdout: "" [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 Nov 26 03:20:04.721: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 03:20:04.908 STEP: Collecting events from namespace "kubectl-5762". 11/26/22 03:20:04.908 STEP: Found 11 events. 11/26/22 03:20:05.099 Nov 26 03:20:05.099: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for failure-4: { } Scheduled: Successfully assigned kubectl-5762/failure-4 to bootstrap-e2e-minion-group-7rps Nov 26 03:20:05.099: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for httpd: { } Scheduled: Successfully assigned kubectl-5762/httpd to bootstrap-e2e-minion-group-7rps Nov 26 03:20:05.099: INFO: At 2022-11-26 03:19:19 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-7rps} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Nov 26 03:20:05.099: INFO: At 2022-11-26 03:19:19 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-7rps} Created: Created container httpd Nov 26 03:20:05.099: INFO: At 2022-11-26 03:19:19 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-7rps} Started: Started container httpd Nov 26 03:20:05.099: INFO: At 2022-11-26 03:19:28 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-7rps} Killing: Stopping container httpd Nov 26 03:20:05.099: INFO: At 2022-11-26 03:19:29 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-7rps} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:20:05.099: INFO: At 2022-11-26 03:19:31 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-7rps} BackOff: Back-off restarting failed container httpd in pod httpd_kubectl-5762(4e189948-5eb8-4907-a5ae-c495add6c629) Nov 26 03:20:05.099: INFO: At 2022-11-26 03:20:00 +0000 UTC - event for failure-4: {kubelet bootstrap-e2e-minion-group-7rps} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Nov 26 03:20:05.099: INFO: At 2022-11-26 03:20:00 +0000 UTC - event for failure-4: {kubelet bootstrap-e2e-minion-group-7rps} Created: Created container failure-4 Nov 26 03:20:05.099: INFO: At 2022-11-26 03:20:00 +0000 UTC - event for failure-4: {kubelet bootstrap-e2e-minion-group-7rps} Started: Started container failure-4 Nov 26 03:20:05.237: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 03:20:05.237: INFO: failure-4 bootstrap-e2e-minion-group-7rps Failed [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:59 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:59 +0000 UTC PodFailed } {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:59 +0000 UTC PodFailed } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:19:59 +0000 UTC }] Nov 26 03:20:05.237: INFO: Nov 26 03:20:05.635: INFO: Unable to fetch kubectl-5762/failure-4/failure-4 logs: an error on the server ("unknown") has prevented the request from succeeding (get pods failure-4) Nov 26 03:20:05.734: INFO: Logging node info for node bootstrap-e2e-master Nov 26 03:20:05.814: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master 6434c658-db87-4566-9960-c594435d7ea0 13220 0 2022-11-26 02:57:42 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:18:27 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:18:27 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:18:27 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:18:27 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:18:27 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.169.190,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5736e6f149167618f71cd530dafef4cc,SystemUUID:5736e6f1-4916-7618-f71c-d530dafef4cc,BootID:aec7342f-3939-425a-bcb1-13b86fb32845,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:20:05.815: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 03:20:05.873: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 03:20:05.963: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-master: error trying to reach service: No agent available Nov 26 03:20:05.963: INFO: Logging node info for node bootstrap-e2e-minion-group-7rps Nov 26 03:20:06.020: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-7rps d3085899-95ef-4dfa-ae30-feaa3b8cf547 13840 0 2022-11-26 02:57:40 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-7rps kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-7rps topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-498":"bootstrap-e2e-minion-group-7rps"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 03:13:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 03:17:47 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 03:19:37 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-7rps,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:17:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:17:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:17:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:17:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:17:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:17:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:17:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:19:37 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:19:37 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:19:37 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:19:37 +0000 UTC,LastTransitionTime:2022-11-26 02:57:42 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.145.67.56,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:25dabe18cf1d9ba1fb46b48496913a34,SystemUUID:25dabe18-cf1d-9ba1-fb46-b48496913a34,BootID:cc4a1118-0d4a-44da-b481-07a484a9d681,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:20:06.020: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-7rps Nov 26 03:20:06.109: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-7rps Nov 26 03:20:06.173: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-7rps: error trying to reach service: No agent available Nov 26 03:20:06.173: INFO: Logging node info for node bootstrap-e2e-minion-group-8wdk Nov 26 03:20:06.225: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-8wdk 99654f41-ed96-4b06-a6c8-1db40d65e751 14000 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-8wdk kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-8wdk topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-9994":"bootstrap-e2e-minion-group-8wdk"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {node-problem-detector Update v1 2022-11-26 03:17:51 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-26 03:19:24 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:volumesAttached":{}}} status} {kubelet Update v1 2022-11-26 03:19:54 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{},"f:volumesInUse":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-8wdk,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:17:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:17:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:17:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:17:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:17:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:17:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:17:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:19:31 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:19:31 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:19:31 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:19:31 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.227.133,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7f88ac496457f212a2a8dc4997301551,SystemUUID:7f88ac49-6457-f212-a2a8-dc4997301551,BootID:691b96ec-62db-4c87-94b3-0915996e979c,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[kubernetes.io/csi/csi-hostpath-provisioning-2914^1f225fd4-6d39-11ed-b0ed-7e8773e2527a],VolumesAttached:[]AttachedVolume{AttachedVolume{Name:kubernetes.io/csi/csi-hostpath-provisioning-2914^1f225fd4-6d39-11ed-b0ed-7e8773e2527a,DevicePath:,},},Config:nil,},} Nov 26 03:20:06.226: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-8wdk Nov 26 03:20:06.301: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-8wdk Nov 26 03:20:06.379: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-8wdk: error trying to reach service: No agent available Nov 26 03:20:06.379: INFO: Logging node info for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:20:06.441: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-h8k8 d6f1c0c9-6047-409c-8381-ed8be6457456 13725 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-h8k8 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-h8k8 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-1938":"bootstrap-e2e-minion-group-h8k8","csi-mock-csi-mock-volumes-6444":"bootstrap-e2e-minion-group-h8k8","csi-mock-csi-mock-volumes-742":"bootstrap-e2e-minion-group-h8k8"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:45 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 03:17:33 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 03:17:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 03:19:30 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-h8k8,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:17:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:17:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:17:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:17:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:17:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:17:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:17:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:17:40 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:17:40 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:17:40 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:17:40 +0000 UTC,LastTransitionTime:2022-11-26 02:57:45 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.177.45,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5d45caccbe73d6049c0c7b580c44ebb3,SystemUUID:5d45cacc-be73-d604-9c0c-7b580c44ebb3,BootID:f77edcc1-2ac4-4561-a5dd-42b928bec3f2,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:20:06.441: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:20:06.518: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-h8k8 Nov 26 03:20:06.610: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-h8k8: error trying to reach service: No agent available [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 STEP: Destroying namespace "kubectl-5762" for this suite. 11/26/22 03:20:06.611
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-cli\]\sKubectl\sclient\sSimple\spod\sshould\sreturn\scommand\sexit\scodes\s\[Slow\]\srunning\sa\sfailing\scommand\swithout\s\-\-restart\=Never$'
test/e2e/kubectl/kubectl.go:567 k8s.io/kubernetes/test/e2e/kubectl.glob..func1.8.7.5() test/e2e/kubectl/kubectl.go:567 +0x31efrom junit_01.xml
[BeforeEach] [sig-cli] Kubectl client set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 03:25:41.578 Nov 26 03:25:41.578: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename kubectl 11/26/22 03:25:41.579 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 03:25:41.744 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 03:25:41.835 [BeforeEach] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-cli] Kubectl client test/e2e/kubectl/kubectl.go:274 [BeforeEach] Simple pod test/e2e/kubectl/kubectl.go:411 STEP: creating the pod from 11/26/22 03:25:41.925 Nov 26 03:25:41.925: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-2995 create -f -' Nov 26 03:25:42.470: INFO: stderr: "" Nov 26 03:25:42.470: INFO: stdout: "pod/httpd created\n" Nov 26 03:25:42.470: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [httpd] Nov 26 03:25:42.470: INFO: Waiting up to 5m0s for pod "httpd" in namespace "kubectl-2995" to be "running and ready" Nov 26 03:25:42.560: INFO: Pod "httpd": Phase="Pending", Reason="", readiness=false. Elapsed: 90.278718ms Nov 26 03:25:42.560: INFO: Error evaluating pod condition running and ready: want pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' to be 'Running' but was 'Pending' Nov 26 03:25:44.672: INFO: Pod "httpd": Phase="Pending", Reason="", readiness=false. Elapsed: 2.201960119s Nov 26 03:25:44.672: INFO: Error evaluating pod condition running and ready: want pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' to be 'Running' but was 'Pending' Nov 26 03:25:46.632: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 4.161942417s Nov 26 03:25:46.632: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:25:48.632: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 6.162047074s Nov 26 03:25:48.632: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:25:50.698: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 8.227398881s Nov 26 03:25:50.698: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:25:52.614: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 10.143644936s Nov 26 03:25:52.614: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:25:54.641: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 12.1709153s Nov 26 03:25:54.641: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:25:56.631: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 14.161275968s Nov 26 03:25:56.632: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:25:58.615: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 16.145213828s Nov 26 03:25:58.615: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:00.636: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 18.16557748s Nov 26 03:26:00.636: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:02.655: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 20.185299484s Nov 26 03:26:02.656: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:04.675: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 22.205168151s Nov 26 03:26:04.675: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:06.602: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 24.132215386s Nov 26 03:26:06.602: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:08.603: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 26.133099933s Nov 26 03:26:08.603: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:10.604: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 28.133355939s Nov 26 03:26:10.604: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:12.602: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 30.131423089s Nov 26 03:26:12.602: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:14.603: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 32.132402089s Nov 26 03:26:14.603: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:16.617: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 34.147182102s Nov 26 03:26:16.617: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:18.602: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 36.131985344s Nov 26 03:26:18.602: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:20.604: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 38.133610345s Nov 26 03:26:20.604: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:22.604: INFO: Pod "httpd": Phase="Running", Reason="", readiness=false. Elapsed: 40.1335519s Nov 26 03:26:22.604: INFO: Error evaluating pod condition running and ready: pod 'httpd' on 'bootstrap-e2e-minion-group-h8k8' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:54 +0000 UTC ContainersNotReady containers with unready status: [httpd]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:25:42 +0000 UTC }] Nov 26 03:26:24.604: INFO: Pod "httpd": Phase="Running", Reason="", readiness=true. Elapsed: 42.133639471s Nov 26 03:26:24.604: INFO: Pod "httpd" satisfied condition "running and ready" Nov 26 03:26:24.604: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [httpd] [It] [Slow] running a failing command without --restart=Never test/e2e/kubectl/kubectl.go:558 Nov 26 03:26:24.604: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-2995 run -i --image=registry.k8s.io/e2e-test-images/busybox:1.29-4 --restart=OnFailure --pod-running-timeout=2m0s failure-2 -- /bin/sh -c cat && exit 42' Nov 26 03:26:26.375: INFO: rc: 1 Nov 26 03:26:26.375: FAIL: Missing expected 'timed out' error, got: exec.CodeExitError{Err:(*errors.errorString)(0xc00542f4f0), Code:1} Full Stack Trace k8s.io/kubernetes/test/e2e/kubectl.glob..func1.8.7.5() test/e2e/kubectl/kubectl.go:567 +0x31e [AfterEach] Simple pod test/e2e/kubectl/kubectl.go:417 STEP: using delete to clean up resources 11/26/22 03:26:26.376 Nov 26 03:26:26.376: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-2995 delete --grace-period=0 --force -f -' Nov 26 03:26:26.654: INFO: stderr: "Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" Nov 26 03:26:26.654: INFO: stdout: "pod \"httpd\" force deleted\n" Nov 26 03:26:26.654: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-2995 get rc,svc -l name=httpd --no-headers' Nov 26 03:26:26.910: INFO: stderr: "No resources found in kubectl-2995 namespace.\n" Nov 26 03:26:26.910: INFO: stdout: "" Nov 26 03:26:26.910: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=kubectl-2995 get pods -l name=httpd -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' Nov 26 03:26:27.116: INFO: stderr: "" Nov 26 03:26:27.116: INFO: stdout: "" [AfterEach] [sig-cli] Kubectl client test/e2e/framework/node/init/init.go:32 Nov 26 03:26:27.116: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [DeferCleanup (Each)] [sig-cli] Kubectl client test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-cli] Kubectl client dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 03:26:27.159 STEP: Collecting events from namespace "kubectl-2995". 11/26/22 03:26:27.159 STEP: Found 11 events. 11/26/22 03:26:27.202 Nov 26 03:26:27.202: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for failure-2: { } Scheduled: Successfully assigned kubectl-2995/failure-2 to bootstrap-e2e-minion-group-h8k8 Nov 26 03:26:27.202: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for httpd: { } Scheduled: Successfully assigned kubectl-2995/httpd to bootstrap-e2e-minion-group-h8k8 Nov 26 03:26:27.202: INFO: At 2022-11-26 03:25:43 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-h8k8} Pulled: Container image "registry.k8s.io/e2e-test-images/httpd:2.4.38-4" already present on machine Nov 26 03:26:27.202: INFO: At 2022-11-26 03:25:43 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-h8k8} Created: Created container httpd Nov 26 03:26:27.202: INFO: At 2022-11-26 03:25:43 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-h8k8} Started: Started container httpd Nov 26 03:26:27.202: INFO: At 2022-11-26 03:25:43 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-h8k8} Killing: Stopping container httpd Nov 26 03:26:27.202: INFO: At 2022-11-26 03:25:44 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-h8k8} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:26:27.202: INFO: At 2022-11-26 03:25:54 +0000 UTC - event for httpd: {kubelet bootstrap-e2e-minion-group-h8k8} BackOff: Back-off restarting failed container httpd in pod httpd_kubectl-2995(5eccc6f7-7e46-4f32-a79c-2cb59fa201da) Nov 26 03:26:27.202: INFO: At 2022-11-26 03:26:25 +0000 UTC - event for failure-2: {kubelet bootstrap-e2e-minion-group-h8k8} Pulled: Container image "registry.k8s.io/e2e-test-images/busybox:1.29-4" already present on machine Nov 26 03:26:27.202: INFO: At 2022-11-26 03:26:25 +0000 UTC - event for failure-2: {kubelet bootstrap-e2e-minion-group-h8k8} Created: Created container failure-2 Nov 26 03:26:27.202: INFO: At 2022-11-26 03:26:25 +0000 UTC - event for failure-2: {kubelet bootstrap-e2e-minion-group-h8k8} Started: Started container failure-2 Nov 26 03:26:27.243: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 03:26:27.243: INFO: failure-2 bootstrap-e2e-minion-group-h8k8 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:26:24 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:26:26 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:26:26 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:26:24 +0000 UTC }] Nov 26 03:26:27.243: INFO: Nov 26 03:26:27.286: INFO: Unable to fetch kubectl-2995/failure-2/failure-2 logs: an error on the server ("unknown") has prevented the request from succeeding (get pods failure-2) Nov 26 03:26:27.328: INFO: Logging node info for node bootstrap-e2e-master Nov 26 03:26:27.370: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master 6434c658-db87-4566-9960-c594435d7ea0 15625 0 2022-11-26 02:57:42 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:23:35 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:23:35 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:23:35 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:23:35 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:23:35 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.169.190,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5736e6f149167618f71cd530dafef4cc,SystemUUID:5736e6f1-4916-7618-f71c-d530dafef4cc,BootID:aec7342f-3939-425a-bcb1-13b86fb32845,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:26:27.371: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 03:26:27.415: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 03:26:27.457: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-master: error trying to reach service: No agent available Nov 26 03:26:27.457: INFO: Logging node info for node bootstrap-e2e-minion-group-7rps Nov 26 03:26:27.506: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-7rps d3085899-95ef-4dfa-ae30-feaa3b8cf547 15454 0 2022-11-26 02:57:40 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-7rps kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-7rps topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-859":"bootstrap-e2e-minion-group-7rps"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 03:22:10 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:22:10 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status} {node-problem-detector Update v1 2022-11-26 03:22:47 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-7rps,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:10 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:10 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:10 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:22:10 +0000 UTC,LastTransitionTime:2022-11-26 02:57:42 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.145.67.56,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:25dabe18cf1d9ba1fb46b48496913a34,SystemUUID:25dabe18-cf1d-9ba1-fb46-b48496913a34,BootID:cc4a1118-0d4a-44da-b481-07a484a9d681,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:26:27.506: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-7rps Nov 26 03:26:27.556: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-7rps Nov 26 03:26:27.599: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-7rps: error trying to reach service: No agent available Nov 26 03:26:27.599: INFO: Logging node info for node bootstrap-e2e-minion-group-8wdk Nov 26 03:26:27.640: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-8wdk 99654f41-ed96-4b06-a6c8-1db40d65e751 16672 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-8wdk kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-8wdk topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-2914":"bootstrap-e2e-minion-group-8wdk"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 03:19:24 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 03:22:51 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 03:25:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-8wdk,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:25:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:25:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:25:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:25:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.227.133,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7f88ac496457f212a2a8dc4997301551,SystemUUID:7f88ac49-6457-f212-a2a8-dc4997301551,BootID:691b96ec-62db-4c87-94b3-0915996e979c,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:26:27.641: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-8wdk Nov 26 03:26:27.685: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-8wdk Nov 26 03:26:27.727: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-8wdk: error trying to reach service: No agent available Nov 26 03:26:27.727: INFO: Logging node info for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:26:27.768: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-h8k8 d6f1c0c9-6047-409c-8381-ed8be6457456 16045 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-h8k8 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-h8k8 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-742":"bootstrap-e2e-minion-group-h8k8"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:45 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 03:17:33 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 03:22:51 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 03:25:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-h8k8,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:22:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:48 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:48 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:22:48 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:22:48 +0000 UTC,LastTransitionTime:2022-11-26 02:57:45 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.177.45,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5d45caccbe73d6049c0c7b580c44ebb3,SystemUUID:5d45cacc-be73-d604-9c0c-7b580c44ebb3,BootID:f77edcc1-2ac4-4561-a5dd-42b928bec3f2,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:26:27.768: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:26:27.812: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-h8k8 Nov 26 03:26:27.854: INFO: Unable to retrieve kubelet pods for node bootstrap-e2e-minion-group-h8k8: error trying to reach service: No agent available [DeferCleanup (Each)] [sig-cli] Kubectl client tear down framework | framework.go:193 STEP: Destroying namespace "kubectl-2995" for this suite. 11/26/22 03:26:27.854
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\shandle\supdates\sto\sExternalTrafficPolicy\sfield$'
test/e2e/framework/network/utils.go:834 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:834 +0x545 k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 +0x417from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 03:02:58.648 Nov 26 03:02:58.648: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 03:02:58.649 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 03:02:58.849 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 03:02:58.98 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1250 [It] should handle updates to ExternalTrafficPolicy field test/e2e/network/loadbalancer.go:1480 STEP: creating a service esipp-3996/external-local-update with type=LoadBalancer 11/26/22 03:02:59.346 STEP: setting ExternalTrafficPolicy=Local 11/26/22 03:02:59.346 STEP: waiting for loadbalancer for service esipp-3996/external-local-update 11/26/22 03:02:59.92 Nov 26 03:02:59.920: INFO: Waiting up to 15m0s for service "external-local-update" to have a LoadBalancer STEP: creating a pod to be part of the service external-local-update 11/26/22 03:05:42.052 Nov 26 03:05:42.162: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 03:05:42.236: INFO: Found 0/1 pods - will retry Nov 26 03:05:44.291: INFO: Found all 1 pods Nov 26 03:05:44.291: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [external-local-update-j72qp] Nov 26 03:05:44.291: INFO: Waiting up to 2m0s for pod "external-local-update-j72qp" in namespace "esipp-3996" to be "running and ready" Nov 26 03:05:44.353: INFO: Pod "external-local-update-j72qp": Phase="Pending", Reason="", readiness=false. Elapsed: 62.768762ms Nov 26 03:05:44.353: INFO: Error evaluating pod condition running and ready: want pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' to be 'Running' but was 'Pending' Nov 26 03:05:46.409: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=false. Elapsed: 2.118248376s Nov 26 03:05:46.409: INFO: Error evaluating pod condition running and ready: pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:45 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:45 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:05:48.411: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=false. Elapsed: 4.120265074s Nov 26 03:05:48.411: INFO: Error evaluating pod condition running and ready: pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:05:50.491: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=false. Elapsed: 6.200604857s Nov 26 03:05:50.491: INFO: Error evaluating pod condition running and ready: pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:05:52.420: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=false. Elapsed: 8.12963249s Nov 26 03:05:52.420: INFO: Error evaluating pod condition running and ready: pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:05:54.495: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=false. Elapsed: 10.204106346s Nov 26 03:05:54.495: INFO: Error evaluating pod condition running and ready: pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:05:56.405: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=false. Elapsed: 12.114553078s Nov 26 03:05:56.405: INFO: Error evaluating pod condition running and ready: pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:05:58.448: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=false. Elapsed: 14.157516664s Nov 26 03:05:58.448: INFO: Error evaluating pod condition running and ready: pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:06:00.412: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=false. Elapsed: 16.121654449s Nov 26 03:06:00.412: INFO: Error evaluating pod condition running and ready: pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:06:02.402: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=false. Elapsed: 18.111845969s Nov 26 03:06:02.402: INFO: Error evaluating pod condition running and ready: pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:06:04.508: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=false. Elapsed: 20.217785127s Nov 26 03:06:04.508: INFO: Error evaluating pod condition running and ready: pod 'external-local-update-j72qp' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:47 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:06:06.415: INFO: Pod "external-local-update-j72qp": Phase="Running", Reason="", readiness=true. Elapsed: 22.124657537s Nov 26 03:06:06.415: INFO: Pod "external-local-update-j72qp" satisfied condition "running and ready" Nov 26 03:06:06.415: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [external-local-update-j72qp] STEP: waiting for loadbalancer for service esipp-3996/external-local-update 11/26/22 03:06:06.415 Nov 26 03:06:06.415: INFO: Waiting up to 15m0s for service "external-local-update" to have a LoadBalancer STEP: turning ESIPP off 11/26/22 03:06:06.47 STEP: Performing setup for networking test in namespace esipp-3996 11/26/22 03:06:07.799 STEP: creating a selector 11/26/22 03:06:07.799 STEP: Creating the service pods in kubernetes 11/26/22 03:06:07.799 Nov 26 03:06:07.799: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable Nov 26 03:06:08.178: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "esipp-3996" to be "running and ready" Nov 26 03:06:08.247: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 68.454745ms Nov 26 03:06:08.247: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 26 03:06:10.313: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.134566581s Nov 26 03:06:10.313: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:12.301: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.122159728s Nov 26 03:06:12.301: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:14.308: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.129356329s Nov 26 03:06:14.308: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:16.302: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.124000917s Nov 26 03:06:16.302: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:18.347: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.168785s Nov 26 03:06:18.347: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:20.307: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.128884581s Nov 26 03:06:20.307: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:22.323: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.144922629s Nov 26 03:06:22.323: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:24.325: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.146802072s Nov 26 03:06:24.325: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:26.295: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.116942318s Nov 26 03:06:26.295: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:28.330: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.151757704s Nov 26 03:06:28.330: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:30.325: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 22.146351944s Nov 26 03:06:30.325: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:32.328: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 24.149563859s Nov 26 03:06:32.328: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:34.340: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 26.16202464s Nov 26 03:06:34.340: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:36.300: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 28.12125987s Nov 26 03:06:36.300: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:38.336: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 30.157349964s Nov 26 03:06:38.336: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:40.313: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 32.134983533s Nov 26 03:06:40.313: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:42.301: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 34.122369211s Nov 26 03:06:42.301: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:44.347: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 36.168352915s Nov 26 03:06:44.347: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:46.314: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 38.135692379s Nov 26 03:06:46.314: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:48.375: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 40.196467899s Nov 26 03:06:48.375: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:50.329: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 42.150677184s Nov 26 03:06:50.329: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:52.299: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 44.120990805s Nov 26 03:06:52.299: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:54.305: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 46.126401998s Nov 26 03:06:54.305: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:56.313: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 48.135002002s Nov 26 03:06:56.313: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:58.366: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 50.187563078s Nov 26 03:06:58.366: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:00.330: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 52.151997781s Nov 26 03:07:00.330: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:02.306: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 54.127647144s Nov 26 03:07:02.306: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:04.308: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 56.130000598s Nov 26 03:07:04.308: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:06.312: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 58.133921503s Nov 26 03:07:06.312: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:08.374: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m0.195714259s Nov 26 03:07:08.374: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:10.608: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m2.429149354s Nov 26 03:07:10.608: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:38.323: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m30.144110919s Nov 26 03:07:38.323: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:40.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m32.109900224s Nov 26 03:07:40.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:42.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m34.110118949s Nov 26 03:07:42.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:44.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m36.109766336s Nov 26 03:07:44.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:46.296: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m38.117274954s Nov 26 03:07:46.296: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:48.310: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m40.131539063s Nov 26 03:07:48.310: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:50.291: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m42.112190646s Nov 26 03:07:50.291: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:52.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m44.110037076s Nov 26 03:07:52.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:54.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m46.1102146s Nov 26 03:07:54.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:56.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m48.109609226s Nov 26 03:07:56.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:58.321: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m50.142623216s Nov 26 03:07:58.321: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 5m0.62s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 5m0.001s) test/e2e/network/loadbalancer.go:1480 At [By Step] Creating the service pods in kubernetes (Step Runtime: 1m51.469s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc000d8e4c8, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x68?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc003b74fb8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0xc002b76095, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc000aa96c0?}, {0xc002b76095?, 0xc000fd5420?}, {0xc000ee5c20?, 0xc003b75200?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000fd28c0, {0x75c6f7c, 0x9}, 0xc002cc0960) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000fd28c0, 0x7fe1ac3668e0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:08:00.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m52.110862775s Nov 26 03:08:00.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:02.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m54.110325585s Nov 26 03:08:02.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:04.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m56.110131908s Nov 26 03:08:04.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:06.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m58.110532606s Nov 26 03:08:06.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:08.310: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m0.132039372s Nov 26 03:08:08.310: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:10.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m2.109843154s Nov 26 03:08:10.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:12.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m4.111022918s Nov 26 03:08:12.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:14.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m6.11026878s Nov 26 03:08:14.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:16.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m8.111049863s Nov 26 03:08:16.290: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:18.311: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m10.132806104s Nov 26 03:08:18.311: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 5m20.622s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 5m20.003s) test/e2e/network/loadbalancer.go:1480 At [By Step] Creating the service pods in kubernetes (Step Runtime: 2m11.471s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc000d8e4c8, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x68?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc003b74fb8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0xc002b76095, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc000aa96c0?}, {0xc002b76095?, 0xc000fd5420?}, {0xc000ee5c20?, 0xc003b75200?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000fd28c0, {0x75c6f7c, 0x9}, 0xc002cc0960) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000fd28c0, 0x7fe1ac3668e0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:08:20.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m12.110681813s Nov 26 03:08:20.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:22.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m14.109837256s Nov 26 03:08:22.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:24.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m16.109857682s Nov 26 03:08:24.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:26.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m18.11001023s Nov 26 03:08:26.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:28.316: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m20.137831426s Nov 26 03:08:28.316: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:30.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m22.110445081s Nov 26 03:08:30.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:32.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m24.110978043s Nov 26 03:08:32.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:34.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m26.110645619s Nov 26 03:08:34.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:36.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m28.109771766s Nov 26 03:08:36.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:38.317: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m30.138824419s Nov 26 03:08:38.317: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 5m40.625s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 5m40.006s) test/e2e/network/loadbalancer.go:1480 At [By Step] Creating the service pods in kubernetes (Step Runtime: 2m31.474s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc000d8e4c8, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x68?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc003b74fb8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0xc002b76095, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc000aa96c0?}, {0xc002b76095?, 0xc000fd5420?}, {0xc000ee5c20?, 0xc003b75200?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000fd28c0, {0x75c6f7c, 0x9}, 0xc002cc0960) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000fd28c0, 0x7fe1ac3668e0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:08:40.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m32.109484123s Nov 26 03:08:40.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:42.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m34.110095375s Nov 26 03:08:42.289: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:44.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m36.109724117s Nov 26 03:08:44.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:46.289: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m38.111052642s Nov 26 03:08:46.290: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:48.316: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m40.138017252s Nov 26 03:08:48.316: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:50.288: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m42.109670304s Nov 26 03:08:50.288: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:52.317: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m44.138997737s Nov 26 03:08:52.317: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:54.338: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m46.159093186s Nov 26 03:08:54.338: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:56.301: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m48.122809889s Nov 26 03:08:56.301: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:58.366: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m50.187689082s Nov 26 03:08:58.366: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 6m0.627s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 6m0.008s) test/e2e/network/loadbalancer.go:1480 At [By Step] Creating the service pods in kubernetes (Step Runtime: 2m51.476s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc000d8e4c8, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x68?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc003b74fb8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0xc002b76095, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc000aa96c0?}, {0xc002b76095?, 0xc000fd5420?}, {0xc000ee5c20?, 0xc003b75200?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000fd28c0, {0x75c6f7c, 0x9}, 0xc002cc0960) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000fd28c0, 0x7fe1ac3668e0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:09:00.354: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m52.1755763s Nov 26 03:09:00.354: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:02.308: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m54.130050975s Nov 26 03:09:02.309: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:04.310: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m56.131486793s Nov 26 03:09:04.310: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:06.307: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m58.128224787s Nov 26 03:09:06.307: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:08.331: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m0.152300262s Nov 26 03:09:08.331: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:10.301: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m2.122618526s Nov 26 03:09:10.301: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:12.290: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m4.11158283s Nov 26 03:09:12.290: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:14.301: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m6.123035406s Nov 26 03:09:14.301: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:16.329: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m8.150156715s Nov 26 03:09:16.329: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:18.347: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m10.168271707s Nov 26 03:09:18.347: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 6m20.63s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 6m20.011s) test/e2e/network/loadbalancer.go:1480 At [By Step] Creating the service pods in kubernetes (Step Runtime: 3m11.479s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc000d8e4c8, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x68?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc003b74fb8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0xc002b76095, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc000aa96c0?}, {0xc002b76095?, 0xc000fd5420?}, {0xc000ee5c20?, 0xc003b75200?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000fd28c0, {0x75c6f7c, 0x9}, 0xc002cc0960) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000fd28c0, 0x7fe1ac3668e0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:09:20.304: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m12.125587871s Nov 26 03:09:20.304: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:22.306: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m14.127337688s Nov 26 03:09:22.306: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:24.335: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m16.15672318s Nov 26 03:09:24.335: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:26.296: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m18.117995799s Nov 26 03:09:26.296: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:28.337: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m20.159019005s Nov 26 03:09:28.337: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:30.313: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m22.134878676s Nov 26 03:09:30.313: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:32.316: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m24.137495802s Nov 26 03:09:32.316: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:34.347: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m26.168903131s Nov 26 03:09:34.347: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:36.308: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m28.129697787s Nov 26 03:09:36.308: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:38.331: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m30.152629551s Nov 26 03:09:38.331: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 6m40.632s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 6m40.013s) test/e2e/network/loadbalancer.go:1480 At [By Step] Creating the service pods in kubernetes (Step Runtime: 3m31.481s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc000d8e4c8, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x68?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc003b74fb8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0xc002b76095, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc000aa96c0?}, {0xc002b76095?, 0xc000fd5420?}, {0xc000ee5c20?, 0xc003b75200?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000fd28c0, {0x75c6f7c, 0x9}, 0xc002cc0960) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000fd28c0, 0x7fe1ac3668e0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:09:40.322: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 3m32.143727203s Nov 26 03:09:40.322: INFO: The phase of Pod netserver-0 is Running (Ready = true) Nov 26 03:09:40.322: INFO: Pod "netserver-0" satisfied condition "running and ready" Nov 26 03:09:40.425: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "esipp-3996" to be "running and ready" Nov 26 03:09:40.584: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 159.407076ms Nov 26 03:09:40.584: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:09:42.636: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2.2117059s Nov 26 03:09:42.637: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:09:44.654: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 4.229548217s Nov 26 03:09:44.654: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:09:46.663: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 6.23795944s Nov 26 03:09:46.663: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:09:48.650: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 8.225428298s Nov 26 03:09:48.650: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:09:50.651: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 10.226275609s Nov 26 03:09:50.651: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:09:52.677: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 12.252580103s Nov 26 03:09:52.677: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:09:54.634: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 14.208899478s Nov 26 03:09:54.634: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:09:56.669: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 16.244231338s Nov 26 03:09:56.669: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:09:58.666: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 18.241618s Nov 26 03:09:58.666: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 7m0.635s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 7m0.016s) test/e2e/network/loadbalancer.go:1480 At [By Step] Creating the service pods in kubernetes (Step Runtime: 3m51.484s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc00345de00, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x68?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc003b74fb8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0xc002b76313, 0xb}, {0x75ee704, 0x11}, 0xc000f32aa0?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc000aa96c0?}, {0xc002b76313?, 0x0?}, {0xc000ee5c20?, 0x0?}, 0xc000ebf0e0?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000fd28c0, {0x75c6f7c, 0x9}, 0xc002cc0960) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000fd28c0, 0x7fe1ac3668e0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:10:00.638: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 20.213576449s Nov 26 03:10:00.638: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:02.678: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 22.253043419s Nov 26 03:10:02.678: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:04.649: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 24.224727263s Nov 26 03:10:04.650: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:06.636: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 26.211615613s Nov 26 03:10:06.636: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:08.643: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 28.218543749s Nov 26 03:10:08.643: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:10.638: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 30.213672974s Nov 26 03:10:10.638: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:12.641: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 32.216241755s Nov 26 03:10:12.641: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:14.695: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 34.270096522s Nov 26 03:10:14.695: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:16.634: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 36.209342469s Nov 26 03:10:16.634: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:18.640: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 38.214859635s Nov 26 03:10:18.640: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 7m20.638s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 7m20.019s) test/e2e/network/loadbalancer.go:1480 At [By Step] Creating the service pods in kubernetes (Step Runtime: 4m11.486s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc00345de00, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x68?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc003b74fb8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0xc002b76313, 0xb}, {0x75ee704, 0x11}, 0xc000f32aa0?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc000aa96c0?}, {0xc002b76313?, 0x0?}, {0xc000ee5c20?, 0x0?}, 0xc000ebf0e0?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000fd28c0, {0x75c6f7c, 0x9}, 0xc002cc0960) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000fd28c0, 0x7fe1ac3668e0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:10:20.648: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 40.22299858s Nov 26 03:10:20.648: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:22.649: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 42.22436432s Nov 26 03:10:22.649: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:24.643: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 44.218198896s Nov 26 03:10:24.643: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:26.663: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 46.238178952s Nov 26 03:10:26.663: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:28.640: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 48.215581732s Nov 26 03:10:28.640: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:30.653: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 50.227966934s Nov 26 03:10:30.653: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:32.643: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 52.218413737s Nov 26 03:10:32.643: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:34.642: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 54.217123463s Nov 26 03:10:34.642: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:36.644: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 56.219557513s Nov 26 03:10:36.644: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:38.664: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 58.239245582s Nov 26 03:10:38.664: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 7m40.64s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 7m40.021s) test/e2e/network/loadbalancer.go:1480 At [By Step] Creating the service pods in kubernetes (Step Runtime: 4m31.489s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc00345de00, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x68?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc003b74fb8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0xc002b76313, 0xb}, {0x75ee704, 0x11}, 0xc000f32aa0?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc000aa96c0?}, {0xc002b76313?, 0x0?}, {0xc000ee5c20?, 0x0?}, 0xc000ebf0e0?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000fd28c0, {0x75c6f7c, 0x9}, 0xc002cc0960) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000fd28c0, 0x7fe1ac3668e0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:10:40.637: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m0.212395673s Nov 26 03:10:40.637: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:42.661: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m2.236635101s Nov 26 03:10:42.661: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:44.645: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m4.220677762s Nov 26 03:10:44.645: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:46.657: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m6.232244281s Nov 26 03:10:46.657: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:48.685: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m8.259863989s Nov 26 03:10:48.685: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:50.653: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m10.228081918s Nov 26 03:10:50.653: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:52.643: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m12.218330123s Nov 26 03:10:52.643: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:54.657: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m14.232680135s Nov 26 03:10:54.657: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:56.684: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m16.259369759s Nov 26 03:10:56.684: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:10:58.647: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m18.221804975s Nov 26 03:10:58.647: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 8m0.642s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 8m0.023s) test/e2e/network/loadbalancer.go:1480 At [By Step] Creating the service pods in kubernetes (Step Runtime: 4m51.491s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc00345de00, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x68?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc003b74fb8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0xc002b76313, 0xb}, {0x75ee704, 0x11}, 0xc000f32aa0?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc000aa96c0?}, {0xc002b76313?, 0x0?}, {0xc000ee5c20?, 0x0?}, 0xc000ebf0e0?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000fd28c0, {0x75c6f7c, 0x9}, 0xc002cc0960) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000fd28c0, 0x7fe1ac3668e0?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:11:00.645: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 1m20.220270292s Nov 26 03:11:00.645: INFO: The phase of Pod netserver-1 is Running (Ready = true) Nov 26 03:11:00.645: INFO: Pod "netserver-1" satisfied condition "running and ready" Nov 26 03:11:00.742: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "esipp-3996" to be "running and ready" Nov 26 03:11:00.822: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 79.692348ms Nov 26 03:11:00.822: INFO: The phase of Pod netserver-2 is Running (Ready = true) Nov 26 03:11:00.822: INFO: Pod "netserver-2" satisfied condition "running and ready" STEP: Creating test pods 11/26/22 03:11:00.893 Nov 26 03:11:01.000: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "esipp-3996" to be "running" Nov 26 03:11:01.071: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 70.897421ms Nov 26 03:11:03.142: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.141653087s Nov 26 03:11:03.142: INFO: Pod "test-container-pod" satisfied condition "running" Nov 26 03:11:03.252: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 STEP: Getting node addresses 11/26/22 03:11:03.252 Nov 26 03:11:03.252: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable STEP: Creating the service on top of the pods in kubernetes 11/26/22 03:11:03.459 Nov 26 03:11:03.578: INFO: Service node-port-service in namespace esipp-3996 found. Nov 26 03:11:03.728: INFO: Service session-affinity-service in namespace esipp-3996 found. STEP: Waiting for NodePort service to expose endpoint 11/26/22 03:11:03.77 Nov 26 03:11:04.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:05.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:06.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:07.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:08.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:09.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:10.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:11.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:12.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:13.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:14.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:15.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:16.772: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:17.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:18.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 8m20.645s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 8m20.026s) test/e2e/network/loadbalancer.go:1480 At [By Step] Waiting for NodePort service to expose endpoint (Step Runtime: 15.522s) test/e2e/framework/network/utils.go:832 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc002571638, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x0?, 0x2fd9d05?, 0x38?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc0000820c8}, 0x754e980?, 0xc003b75550?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x0?, 0x0?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 k8s.io/kubernetes/test/e2e/framework.WaitForServiceEndpointsNum({0x801de88?, 0xc000aa96c0}, {0xc000ee5c20, 0xa}, {0x75ee1b4, 0x11}, 0x3, 0x0?, 0x7fe1d5427a68?) test/e2e/framework/util.go:424 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:833 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:11:19.772: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:20.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:21.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:22.772: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:23.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:24.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:25.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:26.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:27.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:28.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:29.772: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:30.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:31.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:32.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:33.771: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:33.840: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:11:33.884: INFO: Unexpected error: failed to validate endpoints for service node-port-service in namespace: esipp-3996: <*errors.errorString | 0xc000193d60>: { s: "timed out waiting for the condition", } Nov 26 03:11:33.885: FAIL: failed to validate endpoints for service node-port-service in namespace: esipp-3996: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:834 +0x545 k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 +0x417 Nov 26 03:11:33.995: INFO: Waiting up to 15m0s for service "external-local-update" to have no LoadBalancer ------------------------------ Progress Report for Ginkgo Process #18 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field (Spec Runtime: 8m40.647s) test/e2e/network/loadbalancer.go:1480 In [It] (Node Runtime: 8m40.028s) test/e2e/network/loadbalancer.go:1480 At [By Step] Waiting for NodePort service to expose endpoint (Step Runtime: 35.525s) test/e2e/framework/network/utils.go:832 Spec Goroutine goroutine 1550 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc001e6bc98, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x88?, 0x2fd9d05?, 0x48?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003b74ee8?, 0xc003b74ed8?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:460 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.Poll(0x7ffc5eae6502?, 0xa?, 0x7fe0bc8?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:445 k8s.io/kubernetes/test/e2e/framework/providers/gce.(*Provider).EnsureLoadBalancerResourcesDeleted(0xc000e2a348, {0xc000ee4500, 0xe}, {0x77c6ae2, 0x2}) test/e2e/framework/providers/gce/gce.go:195 k8s.io/kubernetes/test/e2e/framework.EnsureLoadBalancerResourcesDeleted(...) test/e2e/framework/util.go:551 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancerDestroy.func1() test/e2e/framework/service/jig.go:602 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).WaitForLoadBalancerDestroy(0xc0017d9040, {0xc000ee4500?, 0x0?}, 0x0?, 0x0?) test/e2e/framework/service/jig.go:614 k8s.io/kubernetes/test/e2e/framework/service.(*TestJig).ChangeServiceType(0x0?, {0x75c5095?, 0x0?}, 0x0?) test/e2e/framework/service/jig.go:186 > k8s.io/kubernetes/test/e2e/network.glob..func20.7.1() test/e2e/network/loadbalancer.go:1494 panic({0x70eb7e0, 0xc0004f05b0}) /usr/local/go/src/runtime/panic.go:884 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2.Fail({0xc00008e510, 0x8d}, {0xc003b754b0?, 0x75b521a?, 0xc003b754d0?}) vendor/github.com/onsi/ginkgo/v2/core_dsl.go:352 k8s.io/kubernetes/test/e2e/framework.Fail({0xc001407100, 0x78}, {0xc003b75548?, 0x76740e9?, 0xc003b75570?}) test/e2e/framework/log.go:61 k8s.io/kubernetes/test/e2e/framework.ExpectNoErrorWithOffset(0x1, {0x7fa3ee0, 0xc000193d60}, {0xc000eecae0?, 0x75ee1b4?, 0x11?}) test/e2e/framework/expect.go:76 k8s.io/kubernetes/test/e2e/framework.ExpectNoError(...) test/e2e/framework/expect.go:43 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000fd28c0, 0x3c?) test/e2e/framework/network/utils.go:834 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001202000, {0x0, 0x0, 0x7f8f6d0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.7() test/e2e/network/loadbalancer.go:1544 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc001ebea80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 03:11:44.256: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 Nov 26 03:11:44.302: INFO: Output of kubectl describe svc: Nov 26 03:11:44.303: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-3996 describe svc --namespace=esipp-3996' Nov 26 03:11:44.923: INFO: stderr: "" Nov 26 03:11:44.923: INFO: stdout: "Name: external-local-update\nNamespace: esipp-3996\nLabels: testid=external-local-update-66061276-2396-4efc-a8c9-bccdf87142db\nAnnotations: <none>\nSelector: testid=external-local-update-66061276-2396-4efc-a8c9-bccdf87142db\nType: ClusterIP\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.193.62\nIPs: 10.0.193.62\nPort: <unset> 80/TCP\nTargetPort: 80/TCP\nEndpoints: \nSession Affinity: None\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal EnsuringLoadBalancer 8m8s service-controller Ensuring load balancer\n Normal ExternalTrafficPolicy 5m38s service-controller Local -> Cluster\n Normal EnsuringLoadBalancer 5m21s (x2 over 6m7s) service-controller Ensuring load balancer\n Normal EnsuredLoadBalancer 4m32s (x2 over 6m3s) service-controller Ensured load balancer\n Normal EnsuringLoadBalancer 2m57s service-controller Ensuring load balancer\n Normal EnsuredLoadBalancer 2m53s service-controller Ensured load balancer\n\n\nName: node-port-service\nNamespace: esipp-3996\nLabels: <none>\nAnnotations: <none>\nSelector: selector-14210ba8-37d2-4af2-a74c-4b8cbd15faec=true\nType: NodePort\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.1.32\nIPs: 10.0.1.32\nPort: http 80/TCP\nTargetPort: 8083/TCP\nNodePort: http 30478/TCP\nEndpoints: <none>\nPort: udp 90/UDP\nTargetPort: 8081/UDP\nNodePort: udp 31537/UDP\nEndpoints: <none>\nSession Affinity: None\nExternal Traffic Policy: Cluster\nEvents: <none>\n\n\nName: session-affinity-service\nNamespace: esipp-3996\nLabels: <none>\nAnnotations: <none>\nSelector: selector-14210ba8-37d2-4af2-a74c-4b8cbd15faec=true\nType: NodePort\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.170.115\nIPs: 10.0.170.115\nPort: http 80/TCP\nTargetPort: 8083/TCP\nNodePort: http 31338/TCP\nEndpoints: <none>\nPort: udp 90/UDP\nTargetPort: 8081/UDP\nNodePort: udp 31621/UDP\nEndpoints: <none>\nSession Affinity: ClientIP\nExternal Traffic Policy: Cluster\nEvents: <none>\n" Nov 26 03:11:44.923: INFO: Name: external-local-update Namespace: esipp-3996 Labels: testid=external-local-update-66061276-2396-4efc-a8c9-bccdf87142db Annotations: <none> Selector: testid=external-local-update-66061276-2396-4efc-a8c9-bccdf87142db Type: ClusterIP IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.193.62 IPs: 10.0.193.62 Port: <unset> 80/TCP TargetPort: 80/TCP Endpoints: Session Affinity: None Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal EnsuringLoadBalancer 8m8s service-controller Ensuring load balancer Normal ExternalTrafficPolicy 5m38s service-controller Local -> Cluster Normal EnsuringLoadBalancer 5m21s (x2 over 6m7s) service-controller Ensuring load balancer Normal EnsuredLoadBalancer 4m32s (x2 over 6m3s) service-controller Ensured load balancer Normal EnsuringLoadBalancer 2m57s service-controller Ensuring load balancer Normal EnsuredLoadBalancer 2m53s service-controller Ensured load balancer Name: node-port-service Namespace: esipp-3996 Labels: <none> Annotations: <none> Selector: selector-14210ba8-37d2-4af2-a74c-4b8cbd15faec=true Type: NodePort IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.1.32 IPs: 10.0.1.32 Port: http 80/TCP TargetPort: 8083/TCP NodePort: http 30478/TCP Endpoints: <none> Port: udp 90/UDP TargetPort: 8081/UDP NodePort: udp 31537/UDP Endpoints: <none> Session Affinity: None External Traffic Policy: Cluster Events: <none> Name: session-affinity-service Namespace: esipp-3996 Labels: <none> Annotations: <none> Selector: selector-14210ba8-37d2-4af2-a74c-4b8cbd15faec=true Type: NodePort IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.170.115 IPs: 10.0.170.115 Port: http 80/TCP TargetPort: 8083/TCP NodePort: http 31338/TCP Endpoints: <none> Port: udp 90/UDP TargetPort: 8081/UDP NodePort: udp 31621/UDP Endpoints: <none> Session Affinity: ClientIP External Traffic Policy: Cluster Events: <none> [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 03:11:44.923 STEP: Collecting events from namespace "esipp-3996". 11/26/22 03:11:44.924 STEP: Found 40 events. 11/26/22 03:11:44.969 Nov 26 03:11:44.969: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for test-container-pod: { } Scheduled: Successfully assigned esipp-3996/test-container-pod to bootstrap-e2e-minion-group-8wdk Nov 26 03:11:44.970: INFO: At 2022-11-26 03:03:36 +0000 UTC - event for external-local-update: {service-controller } EnsuringLoadBalancer: Ensuring load balancer Nov 26 03:11:44.970: INFO: At 2022-11-26 03:05:37 +0000 UTC - event for external-local-update: {service-controller } EnsuringLoadBalancer: Ensuring load balancer Nov 26 03:11:44.970: INFO: At 2022-11-26 03:05:41 +0000 UTC - event for external-local-update: {service-controller } EnsuredLoadBalancer: Ensured load balancer Nov 26 03:11:44.970: INFO: At 2022-11-26 03:05:42 +0000 UTC - event for external-local-update: {replication-controller } SuccessfulCreate: Created pod: external-local-update-j72qp Nov 26 03:11:44.970: INFO: At 2022-11-26 03:05:42 +0000 UTC - event for external-local-update-j72qp: {default-scheduler } Scheduled: Successfully assigned esipp-3996/external-local-update-j72qp to bootstrap-e2e-minion-group-7rps Nov 26 03:11:44.970: INFO: At 2022-11-26 03:05:43 +0000 UTC - event for external-local-update-j72qp: {kubelet bootstrap-e2e-minion-group-7rps} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:11:44.970: INFO: At 2022-11-26 03:05:43 +0000 UTC - event for external-local-update-j72qp: {kubelet bootstrap-e2e-minion-group-7rps} Created: Created container netexec Nov 26 03:11:44.970: INFO: At 2022-11-26 03:05:43 +0000 UTC - event for external-local-update-j72qp: {kubelet bootstrap-e2e-minion-group-7rps} Started: Started container netexec Nov 26 03:11:44.970: INFO: At 2022-11-26 03:05:44 +0000 UTC - event for external-local-update-j72qp: {kubelet bootstrap-e2e-minion-group-7rps} Killing: Stopping container netexec Nov 26 03:11:44.970: INFO: At 2022-11-26 03:05:45 +0000 UTC - event for external-local-update-j72qp: {kubelet bootstrap-e2e-minion-group-7rps} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:11:44.970: INFO: At 2022-11-26 03:05:48 +0000 UTC - event for external-local-update-j72qp: {kubelet bootstrap-e2e-minion-group-7rps} BackOff: Back-off restarting failed container netexec in pod external-local-update-j72qp_esipp-3996(617c8084-b507-4dee-8bd8-5ebef9c9a7a4) Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:06 +0000 UTC - event for external-local-update: {service-controller } ExternalTrafficPolicy: Local -> Cluster Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:08 +0000 UTC - event for netserver-0: {default-scheduler } Scheduled: Successfully assigned esipp-3996/netserver-0 to bootstrap-e2e-minion-group-7rps Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:08 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:08 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Created: Created container webserver Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:08 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Started: Started container webserver Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:08 +0000 UTC - event for netserver-1: {default-scheduler } Scheduled: Successfully assigned esipp-3996/netserver-1 to bootstrap-e2e-minion-group-8wdk Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:08 +0000 UTC - event for netserver-2: {default-scheduler } Scheduled: Successfully assigned esipp-3996/netserver-2 to bootstrap-e2e-minion-group-h8k8 Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:09 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} FailedMount: MountVolume.SetUp failed for volume "kube-api-access-6x462" : failed to sync configmap cache: timed out waiting for the condition Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:09 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:09 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Started: Started container webserver Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:09 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Created: Created container webserver Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:10 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Killing: Stopping container webserver Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:10 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Started: Started container webserver Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:10 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Created: Created container webserver Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:10 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:10 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Killing: Stopping container webserver Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:11 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:11 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:13 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Killing: Stopping container webserver Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:13 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} BackOff: Back-off restarting failed container webserver in pod netserver-2_esipp-3996(70eb3552-b7d6-43d1-b701-8e16d0491026) Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:14 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} BackOff: Back-off restarting failed container webserver in pod netserver-0_esipp-3996(bde1426a-e2eb-4aa1-89d7-c1f83706c68f) Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:14 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:11:44.970: INFO: At 2022-11-26 03:06:30 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} BackOff: Back-off restarting failed container webserver in pod netserver-1_esipp-3996(0583a101-4bca-470e-b81b-cc141d53a2a8) Nov 26 03:11:44.970: INFO: At 2022-11-26 03:08:47 +0000 UTC - event for external-local-update: {service-controller } EnsuringLoadBalancer: Ensuring load balancer Nov 26 03:11:44.970: INFO: At 2022-11-26 03:08:51 +0000 UTC - event for external-local-update: {service-controller } EnsuredLoadBalancer: Ensured load balancer Nov 26 03:11:44.970: INFO: At 2022-11-26 03:11:01 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-8wdk} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:11:44.970: INFO: At 2022-11-26 03:11:01 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-8wdk} Created: Created container webserver Nov 26 03:11:44.970: INFO: At 2022-11-26 03:11:01 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-8wdk} Started: Started container webserver Nov 26 03:11:45.014: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 03:11:45.014: INFO: external-local-update-j72qp bootstrap-e2e-minion-group-7rps Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:11:12 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:11:12 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:05:42 +0000 UTC }] Nov 26 03:11:45.014: INFO: netserver-0 bootstrap-e2e-minion-group-7rps Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:07 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:09:38 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:09:38 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:07 +0000 UTC }] Nov 26 03:11:45.014: INFO: netserver-1 bootstrap-e2e-minion-group-8wdk Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:08 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:10:59 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:10:59 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:08 +0000 UTC }] Nov 26 03:11:45.014: INFO: netserver-2 bootstrap-e2e-minion-group-h8k8 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:08 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:11:12 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:11:12 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:08 +0000 UTC }] Nov 26 03:11:45.014: INFO: test-container-pod bootstrap-e2e-minion-group-8wdk Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:11:00 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:11:02 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:11:02 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:11:00 +0000 UTC }] Nov 26 03:11:45.014: INFO: Nov 26 03:11:45.295: INFO: Logging node info for node bootstrap-e2e-master Nov 26 03:11:45.337: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master 6434c658-db87-4566-9960-c594435d7ea0 8115 0 2022-11-26 02:57:42 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:08:15 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:08:15 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:08:15 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:08:15 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:08:15 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.169.190,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5736e6f149167618f71cd530dafef4cc,SystemUUID:5736e6f1-4916-7618-f71c-d530dafef4cc,BootID:aec7342f-3939-425a-bcb1-13b86fb32845,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:11:45.337: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 03:11:45.382: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 03:11:45.437: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.438: INFO: Container konnectivity-server-container ready: true, restart count 3 Nov 26 03:11:45.438: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.438: INFO: Container kube-controller-manager ready: false, restart count 4 Nov 26 03:11:45.438: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.438: INFO: Container etcd-container ready: true, restart count 2 Nov 26 03:11:45.438: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.438: INFO: Container kube-scheduler ready: true, restart count 1 Nov 26 03:11:45.438: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-26 02:57:14 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.438: INFO: Container l7-lb-controller ready: true, restart count 6 Nov 26 03:11:45.438: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.438: INFO: Container kube-apiserver ready: true, restart count 0 Nov 26 03:11:45.438: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.438: INFO: Container etcd-container ready: true, restart count 0 Nov 26 03:11:45.438: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-26 02:57:14 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.438: INFO: Container kube-addon-manager ready: true, restart count 0 Nov 26 03:11:45.438: INFO: metadata-proxy-v0.1-zcw5j started at 2022-11-26 02:57:48 +0000 UTC (0+2 container statuses recorded) Nov 26 03:11:45.438: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:11:45.438: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:11:45.625: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 03:11:45.625: INFO: Logging node info for node bootstrap-e2e-minion-group-7rps Nov 26 03:11:45.667: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-7rps d3085899-95ef-4dfa-ae30-feaa3b8cf547 9629 0 2022-11-26 02:57:40 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-7rps kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-7rps topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-1869":"bootstrap-e2e-minion-group-7rps"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {node-problem-detector Update v1 2022-11-26 03:07:46 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-26 03:10:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:10:52 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-7rps,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:04 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:04 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:04 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:10:04 +0000 UTC,LastTransitionTime:2022-11-26 02:57:42 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.145.67.56,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:25dabe18cf1d9ba1fb46b48496913a34,SystemUUID:25dabe18-cf1d-9ba1-fb46-b48496913a34,BootID:cc4a1118-0d4a-44da-b481-07a484a9d681,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:11:45.667: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-7rps Nov 26 03:11:45.711: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-7rps Nov 26 03:11:45.769: INFO: netserver-0 started at 2022-11-26 03:06:07 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.769: INFO: Container webserver ready: true, restart count 5 Nov 26 03:11:45.769: INFO: kube-proxy-bootstrap-e2e-minion-group-7rps started at 2022-11-26 02:57:40 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.769: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 03:11:45.769: INFO: csi-mockplugin-0 started at 2022-11-26 03:01:56 +0000 UTC (0+3 container statuses recorded) Nov 26 03:11:45.769: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 03:11:45.769: INFO: Container driver-registrar ready: false, restart count 4 Nov 26 03:11:45.769: INFO: Container mock ready: false, restart count 4 Nov 26 03:11:45.769: INFO: konnectivity-agent-q9wlj started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.769: INFO: Container konnectivity-agent ready: true, restart count 6 Nov 26 03:11:45.769: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:02:54 +0000 UTC (0+7 container statuses recorded) Nov 26 03:11:45.769: INFO: Container csi-attacher ready: true, restart count 2 Nov 26 03:11:45.769: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 03:11:45.769: INFO: Container csi-resizer ready: true, restart count 2 Nov 26 03:11:45.769: INFO: Container csi-snapshotter ready: true, restart count 2 Nov 26 03:11:45.769: INFO: Container hostpath ready: true, restart count 2 Nov 26 03:11:45.769: INFO: Container liveness-probe ready: true, restart count 2 Nov 26 03:11:45.769: INFO: Container node-driver-registrar ready: true, restart count 2 Nov 26 03:11:45.769: INFO: external-local-update-j72qp started at 2022-11-26 03:05:42 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.769: INFO: Container netexec ready: true, restart count 5 Nov 26 03:11:45.769: INFO: metrics-server-v0.5.2-867b8754b9-zrdj7 started at 2022-11-26 02:58:13 +0000 UTC (0+2 container statuses recorded) Nov 26 03:11:45.769: INFO: Container metrics-server ready: false, restart count 5 Nov 26 03:11:45.769: INFO: Container metrics-server-nanny ready: false, restart count 6 Nov 26 03:11:45.769: INFO: pod-d160ca42-832c-4ddf-b18b-b64476b424a2 started at 2022-11-26 03:08:50 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.769: INFO: Container write-pod ready: false, restart count 0 Nov 26 03:11:45.769: INFO: lb-sourcerange-6wkl4 started at 2022-11-26 03:10:28 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.769: INFO: Container netexec ready: true, restart count 1 Nov 26 03:11:45.769: INFO: metadata-proxy-v0.1-swmbn started at 2022-11-26 02:57:41 +0000 UTC (0+2 container statuses recorded) Nov 26 03:11:45.769: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:11:45.769: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:11:45.769: INFO: netserver-0 started at 2022-11-26 03:06:20 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:45.769: INFO: Container webserver ready: false, restart count 5 Nov 26 03:11:45.987: INFO: Latency metrics for node bootstrap-e2e-minion-group-7rps Nov 26 03:11:45.987: INFO: Logging node info for node bootstrap-e2e-minion-group-8wdk Nov 26 03:11:46.029: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-8wdk 99654f41-ed96-4b06-a6c8-1db40d65e751 9271 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-8wdk kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-8wdk topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-4172":"bootstrap-e2e-minion-group-8wdk"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 03:01:23 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 03:07:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 03:09:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-8wdk,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:09:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:09:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:09:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:09:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.227.133,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7f88ac496457f212a2a8dc4997301551,SystemUUID:7f88ac49-6457-f212-a2a8-dc4997301551,BootID:691b96ec-62db-4c87-94b3-0915996e979c,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:11:46.030: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-8wdk Nov 26 03:11:46.073: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-8wdk Nov 26 03:11:46.132: INFO: metadata-proxy-v0.1-fzfwr started at 2022-11-26 02:57:45 +0000 UTC (0+2 container statuses recorded) Nov 26 03:11:46.132: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:11:46.132: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:11:46.132: INFO: execpod-dropjvg4x started at 2022-11-26 03:10:25 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container agnhost-container ready: false, restart count 1 Nov 26 03:11:46.133: INFO: external-local-nodeport-mp5bd started at 2022-11-26 03:06:17 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container netexec ready: true, restart count 1 Nov 26 03:11:46.133: INFO: netserver-1 started at 2022-11-26 03:06:20 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container webserver ready: false, restart count 5 Nov 26 03:11:46.133: INFO: mutability-test-dpfps started at 2022-11-26 03:09:19 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container netexec ready: true, restart count 2 Nov 26 03:11:46.133: INFO: csi-mockplugin-0 started at 2022-11-26 03:08:54 +0000 UTC (0+3 container statuses recorded) Nov 26 03:11:46.133: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 03:11:46.133: INFO: Container driver-registrar ready: true, restart count 2 Nov 26 03:11:46.133: INFO: Container mock ready: true, restart count 2 Nov 26 03:11:46.133: INFO: kube-dns-autoscaler-5f6455f985-54xp5 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container autoscaler ready: false, restart count 6 Nov 26 03:11:46.133: INFO: coredns-6d97d5ddb-6l6bj started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container coredns ready: false, restart count 6 Nov 26 03:11:46.133: INFO: konnectivity-agent-v4t28 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container konnectivity-agent ready: false, restart count 6 Nov 26 03:11:46.133: INFO: test-container-pod started at 2022-11-26 03:11:00 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container webserver ready: true, restart count 0 Nov 26 03:11:46.133: INFO: net-tiers-svc-b29kg started at 2022-11-26 03:06:52 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container netexec ready: true, restart count 1 Nov 26 03:11:46.133: INFO: external-provisioner-7fh56 started at 2022-11-26 03:07:05 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container nfs-provisioner ready: false, restart count 4 Nov 26 03:11:46.133: INFO: kube-proxy-bootstrap-e2e-minion-group-8wdk started at 2022-11-26 02:57:44 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container kube-proxy ready: false, restart count 5 Nov 26 03:11:46.133: INFO: netserver-1 started at 2022-11-26 03:06:08 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container webserver ready: true, restart count 5 Nov 26 03:11:46.133: INFO: l7-default-backend-8549d69d99-kgcjh started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 03:11:46.133: INFO: volume-snapshot-controller-0 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container volume-snapshot-controller ready: true, restart count 6 Nov 26 03:11:46.133: INFO: inclusterclient started at 2022-11-26 03:04:10 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container inclusterclient ready: false, restart count 0 Nov 26 03:11:46.133: INFO: execpod-acceptrr72x started at 2022-11-26 03:10:23 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 03:11:46.133: INFO: pod-6f4844bc-1d15-4f79-abe4-3f079a630918 started at 2022-11-26 03:08:48 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.133: INFO: Container write-pod ready: false, restart count 0 Nov 26 03:11:46.355: INFO: Latency metrics for node bootstrap-e2e-minion-group-8wdk Nov 26 03:11:46.355: INFO: Logging node info for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:11:46.397: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-h8k8 d6f1c0c9-6047-409c-8381-ed8be6457456 9804 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-h8k8 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-h8k8 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-388":"bootstrap-e2e-minion-group-h8k8","csi-hostpath-provisioning-5570":"bootstrap-e2e-minion-group-h8k8"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:45 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {node-problem-detector Update v1 2022-11-26 03:07:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-26 03:09:10 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:11:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-h8k8,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:01 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:01 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:01 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:10:01 +0000 UTC,LastTransitionTime:2022-11-26 02:57:45 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.177.45,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5d45caccbe73d6049c0c7b580c44ebb3,SystemUUID:5d45cacc-be73-d604-9c0c-7b580c44ebb3,BootID:f77edcc1-2ac4-4561-a5dd-42b928bec3f2,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:11:46.398: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:11:46.442: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-h8k8 Nov 26 03:11:46.502: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:08:55 +0000 UTC (0+7 container statuses recorded) Nov 26 03:11:46.502: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 03:11:46.502: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 03:11:46.502: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 03:11:46.502: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 03:11:46.502: INFO: Container hostpath ready: true, restart count 0 Nov 26 03:11:46.502: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 03:11:46.502: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 03:11:46.502: INFO: konnectivity-agent-t5nvv started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.502: INFO: Container konnectivity-agent ready: false, restart count 5 Nov 26 03:11:46.502: INFO: back-off-cap started at 2022-11-26 02:59:37 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.502: INFO: Container back-off-cap ready: false, restart count 7 Nov 26 03:11:46.502: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:03:12 +0000 UTC (0+7 container statuses recorded) Nov 26 03:11:46.502: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 03:11:46.502: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 03:11:46.502: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 03:11:46.502: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 03:11:46.502: INFO: Container hostpath ready: false, restart count 5 Nov 26 03:11:46.502: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 03:11:46.502: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 03:11:46.502: INFO: netserver-2 started at 2022-11-26 03:06:20 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.502: INFO: Container webserver ready: false, restart count 3 Nov 26 03:11:46.502: INFO: csi-mockplugin-0 started at 2022-11-26 03:03:22 +0000 UTC (0+4 container statuses recorded) Nov 26 03:11:46.502: INFO: Container busybox ready: false, restart count 4 Nov 26 03:11:46.502: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 03:11:46.502: INFO: Container driver-registrar ready: false, restart count 4 Nov 26 03:11:46.502: INFO: Container mock ready: false, restart count 4 Nov 26 03:11:46.502: INFO: csi-mockplugin-0 started at 2022-11-26 03:02:45 +0000 UTC (0+4 container statuses recorded) Nov 26 03:11:46.502: INFO: Container busybox ready: false, restart count 3 Nov 26 03:11:46.502: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 03:11:46.502: INFO: Container driver-registrar ready: true, restart count 5 Nov 26 03:11:46.502: INFO: Container mock ready: true, restart count 5 Nov 26 03:11:46.502: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:08:47 +0000 UTC (0+7 container statuses recorded) Nov 26 03:11:46.502: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 03:11:46.502: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 03:11:46.502: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 03:11:46.502: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 03:11:46.502: INFO: Container hostpath ready: true, restart count 1 Nov 26 03:11:46.502: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 03:11:46.502: INFO: Container node-driver-registrar ready: true, restart count 1 Nov 26 03:11:46.502: INFO: hostexec-bootstrap-e2e-minion-group-h8k8-q7bst started at 2022-11-26 03:10:20 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.502: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 03:11:46.502: INFO: kube-proxy-bootstrap-e2e-minion-group-h8k8 started at 2022-11-26 02:57:44 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.502: INFO: Container kube-proxy ready: true, restart count 6 Nov 26 03:11:46.502: INFO: metadata-proxy-v0.1-9vqbj started at 2022-11-26 02:57:45 +0000 UTC (0+2 container statuses recorded) Nov 26 03:11:46.502: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:11:46.502: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:11:46.502: INFO: coredns-6d97d5ddb-x27zq started at 2022-11-26 02:58:05 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.502: INFO: Container coredns ready: false, restart count 6 Nov 26 03:11:46.502: INFO: netserver-2 started at 2022-11-26 03:06:08 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:46.502: INFO: Container webserver ready: false, restart count 3 Nov 26 03:11:46.502: INFO: pod-subpath-test-preprovisionedpv-2cvw started at 2022-11-26 03:10:32 +0000 UTC (1+2 container statuses recorded) Nov 26 03:11:46.502: INFO: Init container init-volume-preprovisionedpv-2cvw ready: true, restart count 0 Nov 26 03:11:46.502: INFO: Container test-container-subpath-preprovisionedpv-2cvw ready: true, restart count 2 Nov 26 03:11:46.503: INFO: Container test-container-volume-preprovisionedpv-2cvw ready: true, restart count 2 Nov 26 03:11:46.770: INFO: Latency metrics for node bootstrap-e2e-minion-group-h8k8 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "esipp-3996" for this suite. 11/26/22 03:11:46.77
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\sonly\starget\snodes\swith\sendpoints$'
test/e2e/framework/network/utils.go:834 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc0011ae000, 0x3c?) test/e2e/framework/network/utils.go:834 +0x545 k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000d1e000, {0x0, 0x0, 0xc0012b6ae0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.5() test/e2e/network/loadbalancer.go:1382 +0x445from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 03:25:31.097 Nov 26 03:25:31.097: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 03:25:31.099 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 03:25:31.388 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 03:25:31.482 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1250 [It] should only target nodes with endpoints test/e2e/network/loadbalancer.go:1346 STEP: creating a service esipp-7542/external-local-nodes with type=LoadBalancer 11/26/22 03:25:31.809 STEP: setting ExternalTrafficPolicy=Local 11/26/22 03:25:31.809 STEP: waiting for loadbalancer for service esipp-7542/external-local-nodes 11/26/22 03:25:31.965 Nov 26 03:25:31.965: INFO: Waiting up to 15m0s for service "external-local-nodes" to have a LoadBalancer STEP: waiting for loadbalancer for service esipp-7542/external-local-nodes 11/26/22 03:27:50.122 Nov 26 03:27:50.122: INFO: Waiting up to 15m0s for service "external-local-nodes" to have a LoadBalancer STEP: Performing setup for networking test in namespace esipp-7542 11/26/22 03:27:50.187 STEP: creating a selector 11/26/22 03:27:50.187 STEP: Creating the service pods in kubernetes 11/26/22 03:27:50.187 Nov 26 03:27:50.187: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable Nov 26 03:27:50.610: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "esipp-7542" to be "running and ready" Nov 26 03:27:50.677: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 67.423355ms Nov 26 03:27:50.677: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 26 03:27:52.748: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 2.138128396s Nov 26 03:27:52.748: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 26 03:27:54.744: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.13433317s Nov 26 03:27:54.744: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:27:56.726: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.11612519s Nov 26 03:27:56.726: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:27:58.769: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.159414018s Nov 26 03:27:58.769: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:28:00.747: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.136991192s Nov 26 03:28:00.747: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:28:02.736: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.126209136s Nov 26 03:28:02.736: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:28:04.853: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.2428518s Nov 26 03:28:04.853: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:28:06.732: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.121771846s Nov 26 03:28:06.732: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:28:08.744: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.134392281s Nov 26 03:28:08.744: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:28:10.772: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.162644162s Nov 26 03:28:10.773: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:28:12.734: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 22.123925164s Nov 26 03:28:12.734: INFO: The phase of Pod netserver-0 is Running (Ready = true) Nov 26 03:28:12.734: INFO: Pod "netserver-0" satisfied condition "running and ready" Nov 26 03:28:12.784: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "esipp-7542" to be "running and ready" Nov 26 03:28:12.842: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 58.033623ms Nov 26 03:28:12.842: INFO: The phase of Pod netserver-1 is Running (Ready = true) Nov 26 03:28:12.842: INFO: Pod "netserver-1" satisfied condition "running and ready" Nov 26 03:28:12.901: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "esipp-7542" to be "running and ready" Nov 26 03:28:12.959: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 57.945792ms Nov 26 03:28:12.959: INFO: The phase of Pod netserver-2 is Running (Ready = true) Nov 26 03:28:12.959: INFO: Pod "netserver-2" satisfied condition "running and ready" STEP: Creating test pods 11/26/22 03:28:13.012 Nov 26 03:28:13.091: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "esipp-7542" to be "running" Nov 26 03:28:13.177: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 86.0085ms Nov 26 03:28:15.292: INFO: Pod "test-container-pod": Phase="Running", Reason="", readiness=true. Elapsed: 2.201566522s Nov 26 03:28:15.292: INFO: Pod "test-container-pod" satisfied condition "running" Nov 26 03:28:15.545: INFO: Setting MaxTries for pod polling to 39 for networking test based on endpoint count 3 STEP: Getting node addresses 11/26/22 03:28:15.545 Nov 26 03:28:15.545: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable STEP: Creating the service on top of the pods in kubernetes 11/26/22 03:28:15.655 Nov 26 03:28:15.835: INFO: Service node-port-service in namespace esipp-7542 found. Nov 26 03:28:16.126: INFO: Service session-affinity-service in namespace esipp-7542 found. STEP: Waiting for NodePort service to expose endpoint 11/26/22 03:28:16.18 Nov 26 03:28:17.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:18.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:19.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:20.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:21.180: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:22.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:23.180: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:24.180: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:25.180: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:26.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:27.180: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:28.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:29.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:30.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:31.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:32.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:33.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:34.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:35.180: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:36.180: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:37.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:38.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:39.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:40.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:41.180: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:42.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:43.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:44.180: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:45.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:46.181: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:46.247: INFO: Waiting for amount of service:node-port-service endpoints to be 3 Nov 26 03:28:46.320: INFO: Unexpected error: failed to validate endpoints for service node-port-service in namespace: esipp-7542: <*errors.errorString | 0xc0001c9a00>: { s: "timed out waiting for the condition", } Nov 26 03:28:46.320: FAIL: failed to validate endpoints for service node-port-service in namespace: esipp-7542: timed out waiting for the condition Full Stack Trace k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc0011ae000, 0x3c?) test/e2e/framework/network/utils.go:834 +0x545 k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000d1e000, {0x0, 0x0, 0xc0012b6ae0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.5() test/e2e/network/loadbalancer.go:1382 +0x445 Nov 26 03:28:46.670: INFO: Waiting up to 15m0s for service "external-local-nodes" to have no LoadBalancer [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 03:28:56.958: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 Nov 26 03:28:57.010: INFO: Output of kubectl describe svc: Nov 26 03:28:57.010: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-7542 describe svc --namespace=esipp-7542' Nov 26 03:28:57.952: INFO: stderr: "" Nov 26 03:28:57.952: INFO: stdout: "Name: external-local-nodes\nNamespace: esipp-7542\nLabels: testid=external-local-nodes-f4291801-a8e9-4861-a5c5-eb33bd7e4797\nAnnotations: <none>\nSelector: testid=external-local-nodes-f4291801-a8e9-4861-a5c5-eb33bd7e4797\nType: ClusterIP\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.141.8\nIPs: 10.0.141.8\nPort: <unset> 8081/TCP\nTargetPort: 80/TCP\nEndpoints: <none>\nSession Affinity: None\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal EnsuringLoadBalancer 104s service-controller Ensuring load balancer\n Normal EnsuredLoadBalancer 68s service-controller Ensured load balancer\n Normal Type 11s service-controller LoadBalancer -> ClusterIP\n\n\nName: node-port-service\nNamespace: esipp-7542\nLabels: <none>\nAnnotations: <none>\nSelector: selector-5cf32dd5-f9a2-4574-b997-2dee4480f3aa=true\nType: NodePort\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.221.241\nIPs: 10.0.221.241\nPort: http 80/TCP\nTargetPort: 8083/TCP\nNodePort: http 31784/TCP\nEndpoints: 10.64.0.245:8083,10.64.2.234:8083,10.64.3.34:8083\nPort: udp 90/UDP\nTargetPort: 8081/UDP\nNodePort: udp 31823/UDP\nEndpoints: 10.64.0.245:8081,10.64.2.234:8081,10.64.3.34:8081\nSession Affinity: None\nExternal Traffic Policy: Cluster\nEvents: <none>\n\n\nName: session-affinity-service\nNamespace: esipp-7542\nLabels: <none>\nAnnotations: <none>\nSelector: selector-5cf32dd5-f9a2-4574-b997-2dee4480f3aa=true\nType: NodePort\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.0.116.228\nIPs: 10.0.116.228\nPort: http 80/TCP\nTargetPort: 8083/TCP\nNodePort: http 32498/TCP\nEndpoints: 10.64.0.245:8083,10.64.2.234:8083,10.64.3.34:8083\nPort: udp 90/UDP\nTargetPort: 8081/UDP\nNodePort: udp 32640/UDP\nEndpoints: 10.64.0.245:8081,10.64.2.234:8081,10.64.3.34:8081\nSession Affinity: ClientIP\nExternal Traffic Policy: Cluster\nEvents: <none>\n" Nov 26 03:28:57.952: INFO: Name: external-local-nodes Namespace: esipp-7542 Labels: testid=external-local-nodes-f4291801-a8e9-4861-a5c5-eb33bd7e4797 Annotations: <none> Selector: testid=external-local-nodes-f4291801-a8e9-4861-a5c5-eb33bd7e4797 Type: ClusterIP IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.141.8 IPs: 10.0.141.8 Port: <unset> 8081/TCP TargetPort: 80/TCP Endpoints: <none> Session Affinity: None Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal EnsuringLoadBalancer 104s service-controller Ensuring load balancer Normal EnsuredLoadBalancer 68s service-controller Ensured load balancer Normal Type 11s service-controller LoadBalancer -> ClusterIP Name: node-port-service Namespace: esipp-7542 Labels: <none> Annotations: <none> Selector: selector-5cf32dd5-f9a2-4574-b997-2dee4480f3aa=true Type: NodePort IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.221.241 IPs: 10.0.221.241 Port: http 80/TCP TargetPort: 8083/TCP NodePort: http 31784/TCP Endpoints: 10.64.0.245:8083,10.64.2.234:8083,10.64.3.34:8083 Port: udp 90/UDP TargetPort: 8081/UDP NodePort: udp 31823/UDP Endpoints: 10.64.0.245:8081,10.64.2.234:8081,10.64.3.34:8081 Session Affinity: None External Traffic Policy: Cluster Events: <none> Name: session-affinity-service Namespace: esipp-7542 Labels: <none> Annotations: <none> Selector: selector-5cf32dd5-f9a2-4574-b997-2dee4480f3aa=true Type: NodePort IP Family Policy: SingleStack IP Families: IPv4 IP: 10.0.116.228 IPs: 10.0.116.228 Port: http 80/TCP TargetPort: 8083/TCP NodePort: http 32498/TCP Endpoints: 10.64.0.245:8083,10.64.2.234:8083,10.64.3.34:8083 Port: udp 90/UDP TargetPort: 8081/UDP NodePort: udp 32640/UDP Endpoints: 10.64.0.245:8081,10.64.2.234:8081,10.64.3.34:8081 Session Affinity: ClientIP External Traffic Policy: Cluster Events: <none> [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 03:28:57.952 STEP: Collecting events from namespace "esipp-7542". 11/26/22 03:28:57.952 STEP: Found 28 events. 11/26/22 03:28:58.025 Nov 26 03:28:58.025: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for netserver-0: { } Scheduled: Successfully assigned esipp-7542/netserver-0 to bootstrap-e2e-minion-group-7rps Nov 26 03:28:58.025: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for netserver-1: { } Scheduled: Successfully assigned esipp-7542/netserver-1 to bootstrap-e2e-minion-group-8wdk Nov 26 03:28:58.025: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for netserver-2: { } Scheduled: Successfully assigned esipp-7542/netserver-2 to bootstrap-e2e-minion-group-h8k8 Nov 26 03:28:58.025: INFO: At 0001-01-01 00:00:00 +0000 UTC - event for test-container-pod: { } Scheduled: Successfully assigned esipp-7542/test-container-pod to bootstrap-e2e-minion-group-7rps Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:13 +0000 UTC - event for external-local-nodes: {service-controller } EnsuringLoadBalancer: Ensuring load balancer Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:49 +0000 UTC - event for external-local-nodes: {service-controller } EnsuredLoadBalancer: Ensured load balancer Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:51 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} FailedMount: MountVolume.SetUp failed for volume "kube-api-access-9rd45" : failed to sync configmap cache: timed out waiting for the condition Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:51 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} FailedMount: MountVolume.SetUp failed for volume "kube-api-access-qtrmn" : failed to sync configmap cache: timed out waiting for the condition Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:52 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Created: Created container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:52 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Started: Started container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:52 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:52 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Started: Started container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:52 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:52 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Created: Created container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:52 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Killing: Stopping container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:53 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Started: Started container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:53 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Created: Created container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:53 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:28:58.025: INFO: At 2022-11-26 03:27:53 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:28:58.025: INFO: At 2022-11-26 03:28:12 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:28:58.025: INFO: At 2022-11-26 03:28:12 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Killing: Stopping container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:28:13 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-7rps} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:28:58.025: INFO: At 2022-11-26 03:28:13 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-7rps} Created: Created container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:28:14 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-7rps} Started: Started container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:28:15 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-7rps} Killing: Stopping container webserver Nov 26 03:28:58.025: INFO: At 2022-11-26 03:28:16 +0000 UTC - event for test-container-pod: {kubelet bootstrap-e2e-minion-group-7rps} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:28:58.025: INFO: At 2022-11-26 03:28:17 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} BackOff: Back-off restarting failed container webserver in pod netserver-0_esipp-7542(5a151328-a5f9-45e5-8047-7f6ed15baee9) Nov 26 03:28:58.025: INFO: At 2022-11-26 03:28:46 +0000 UTC - event for external-local-nodes: {service-controller } Type: LoadBalancer -> ClusterIP Nov 26 03:28:58.124: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 03:28:58.124: INFO: netserver-0 bootstrap-e2e-minion-group-7rps Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:27:50 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:28:52 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:28:52 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:27:50 +0000 UTC }] Nov 26 03:28:58.124: INFO: netserver-1 bootstrap-e2e-minion-group-8wdk Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:27:50 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:28:02 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:28:02 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:27:50 +0000 UTC }] Nov 26 03:28:58.124: INFO: netserver-2 bootstrap-e2e-minion-group-h8k8 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:27:50 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:28:11 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:28:11 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:27:50 +0000 UTC }] Nov 26 03:28:58.124: INFO: test-container-pod bootstrap-e2e-minion-group-7rps Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:28:13 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:28:17 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:28:17 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:28:13 +0000 UTC }] Nov 26 03:28:58.125: INFO: Nov 26 03:28:58.948: INFO: Logging node info for node bootstrap-e2e-master Nov 26 03:28:59.019: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master 6434c658-db87-4566-9960-c594435d7ea0 18244 0 2022-11-26 02:57:42 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:28:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:28:42 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:28:42 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:28:42 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:28:42 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.169.190,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5736e6f149167618f71cd530dafef4cc,SystemUUID:5736e6f1-4916-7618-f71c-d530dafef4cc,BootID:aec7342f-3939-425a-bcb1-13b86fb32845,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:28:59.020: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 03:28:59.081: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 03:28:59.262: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.262: INFO: Container kube-apiserver ready: true, restart count 0 Nov 26 03:28:59.262: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.262: INFO: Container etcd-container ready: true, restart count 2 Nov 26 03:28:59.262: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-26 02:57:14 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.262: INFO: Container kube-addon-manager ready: true, restart count 1 Nov 26 03:28:59.262: INFO: metadata-proxy-v0.1-zcw5j started at 2022-11-26 02:57:48 +0000 UTC (0+2 container statuses recorded) Nov 26 03:28:59.262: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:28:59.262: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:28:59.262: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.262: INFO: Container konnectivity-server-container ready: true, restart count 6 Nov 26 03:28:59.262: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.262: INFO: Container kube-controller-manager ready: true, restart count 6 Nov 26 03:28:59.262: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.262: INFO: Container etcd-container ready: true, restart count 2 Nov 26 03:28:59.262: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.262: INFO: Container kube-scheduler ready: false, restart count 3 Nov 26 03:28:59.262: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-26 02:57:14 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.262: INFO: Container l7-lb-controller ready: true, restart count 8 Nov 26 03:28:59.627: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 03:28:59.627: INFO: Logging node info for node bootstrap-e2e-minion-group-7rps Nov 26 03:28:59.704: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-7rps d3085899-95ef-4dfa-ae30-feaa3b8cf547 17954 0 2022-11-26 02:57:40 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-7rps kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-7rps topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-859":"bootstrap-e2e-minion-group-7rps"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 03:22:10 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:27:33 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status} {node-problem-detector Update v1 2022-11-26 03:27:47 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-7rps,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:27:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:27:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:27:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:27:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:27:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:27:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:27:47 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:27:18 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:27:18 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:27:18 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:27:18 +0000 UTC,LastTransitionTime:2022-11-26 02:57:42 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.145.67.56,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:25dabe18cf1d9ba1fb46b48496913a34,SystemUUID:25dabe18-cf1d-9ba1-fb46-b48496913a34,BootID:cc4a1118-0d4a-44da-b481-07a484a9d681,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:28:59.705: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-7rps Nov 26 03:28:59.778: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-7rps Nov 26 03:28:59.890: INFO: netserver-0 started at 2022-11-26 03:27:50 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.891: INFO: Container webserver ready: true, restart count 2 Nov 26 03:28:59.891: INFO: lb-sourcerange-6wkl4 started at 2022-11-26 03:10:28 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.891: INFO: Container netexec ready: false, restart count 6 Nov 26 03:28:59.891: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:20:58 +0000 UTC (0+7 container statuses recorded) Nov 26 03:28:59.891: INFO: Container csi-attacher ready: true, restart count 4 Nov 26 03:28:59.891: INFO: Container csi-provisioner ready: true, restart count 4 Nov 26 03:28:59.891: INFO: Container csi-resizer ready: true, restart count 4 Nov 26 03:28:59.891: INFO: Container csi-snapshotter ready: true, restart count 4 Nov 26 03:28:59.891: INFO: Container hostpath ready: true, restart count 4 Nov 26 03:28:59.891: INFO: Container liveness-probe ready: true, restart count 4 Nov 26 03:28:59.891: INFO: Container node-driver-registrar ready: true, restart count 4 Nov 26 03:28:59.891: INFO: metadata-proxy-v0.1-swmbn started at 2022-11-26 02:57:41 +0000 UTC (0+2 container statuses recorded) Nov 26 03:28:59.891: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:28:59.891: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:28:59.891: INFO: lb-internal-tc6qt started at 2022-11-26 03:25:21 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.891: INFO: Container netexec ready: true, restart count 1 Nov 26 03:28:59.891: INFO: test-container-pod started at 2022-11-26 03:28:13 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.891: INFO: Container webserver ready: true, restart count 1 Nov 26 03:28:59.891: INFO: ilb-host-exec started at 2022-11-26 03:27:15 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.891: INFO: Container agnhost-container ready: false, restart count 1 Nov 26 03:28:59.891: INFO: kube-proxy-bootstrap-e2e-minion-group-7rps started at 2022-11-26 02:57:40 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.891: INFO: Container kube-proxy ready: false, restart count 8 Nov 26 03:28:59.891: INFO: csi-mockplugin-0 started at 2022-11-26 03:21:45 +0000 UTC (0+4 container statuses recorded) Nov 26 03:28:59.891: INFO: Container busybox ready: true, restart count 5 Nov 26 03:28:59.891: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 03:28:59.891: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 03:28:59.891: INFO: Container mock ready: false, restart count 5 Nov 26 03:28:59.891: INFO: external-provisioner-74p6c started at 2022-11-26 03:27:01 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.891: INFO: Container nfs-provisioner ready: true, restart count 0 Nov 26 03:28:59.891: INFO: konnectivity-agent-q9wlj started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.891: INFO: Container konnectivity-agent ready: false, restart count 8 Nov 26 03:28:59.891: INFO: pod-d160ca42-832c-4ddf-b18b-b64476b424a2 started at 2022-11-26 03:08:50 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.891: INFO: Container write-pod ready: false, restart count 0 Nov 26 03:28:59.891: INFO: ss-0 started at 2022-11-26 03:19:46 +0000 UTC (0+1 container statuses recorded) Nov 26 03:28:59.891: INFO: Container webserver ready: false, restart count 6 Nov 26 03:28:59.891: INFO: metrics-server-v0.5.2-867b8754b9-zrdj7 started at 2022-11-26 02:58:13 +0000 UTC (0+2 container statuses recorded) Nov 26 03:28:59.891: INFO: Container metrics-server ready: false, restart count 9 Nov 26 03:28:59.891: INFO: Container metrics-server-nanny ready: false, restart count 10 Nov 26 03:29:00.327: INFO: Latency metrics for node bootstrap-e2e-minion-group-7rps Nov 26 03:29:00.327: INFO: Logging node info for node bootstrap-e2e-minion-group-8wdk Nov 26 03:29:00.417: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-8wdk 99654f41-ed96-4b06-a6c8-1db40d65e751 18226 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-8wdk kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-8wdk topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-7017":"bootstrap-e2e-minion-group-8wdk","csi-hostpath-provisioning-5215":"bootstrap-e2e-minion-group-8wdk","csi-hostpath-provisioning-7066":"bootstrap-e2e-minion-group-8wdk"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 03:27:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 03:27:51 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 03:28:38 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-8wdk,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:27:52 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:27:52 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:27:52 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:27:52 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.227.133,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7f88ac496457f212a2a8dc4997301551,SystemUUID:7f88ac49-6457-f212-a2a8-dc4997301551,BootID:691b96ec-62db-4c87-94b3-0915996e979c,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/volume/nfs@sha256:3bda73f2428522b0e342af80a0b9679e8594c2126f2b3cca39ed787589741b9e registry.k8s.io/e2e-test-images/volume/nfs:1.3],SizeBytes:95836203,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:29:00.418: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-8wdk Nov 26 03:29:00.496: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-8wdk Nov 26 03:29:00.639: INFO: netserver-1 started at 2022-11-26 03:27:50 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container webserver ready: true, restart count 0 Nov 26 03:29:00.639: INFO: pvc-volume-tester-j7n8g started at 2022-11-26 03:16:40 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container volume-tester ready: false, restart count 0 Nov 26 03:29:00.639: INFO: kube-proxy-bootstrap-e2e-minion-group-8wdk started at 2022-11-26 02:57:44 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container kube-proxy ready: false, restart count 8 Nov 26 03:29:00.639: INFO: l7-default-backend-8549d69d99-kgcjh started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 03:29:00.639: INFO: volume-snapshot-controller-0 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container volume-snapshot-controller ready: false, restart count 8 Nov 26 03:29:00.639: INFO: execpod-acceptrr72x started at 2022-11-26 03:10:23 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container agnhost-container ready: true, restart count 6 Nov 26 03:29:00.639: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:19:18 +0000 UTC (0+7 container statuses recorded) Nov 26 03:29:00.639: INFO: Container csi-attacher ready: false, restart count 6 Nov 26 03:29:00.639: INFO: Container csi-provisioner ready: false, restart count 6 Nov 26 03:29:00.639: INFO: Container csi-resizer ready: false, restart count 6 Nov 26 03:29:00.639: INFO: Container csi-snapshotter ready: false, restart count 6 Nov 26 03:29:00.639: INFO: Container hostpath ready: false, restart count 6 Nov 26 03:29:00.639: INFO: Container liveness-probe ready: false, restart count 6 Nov 26 03:29:00.639: INFO: Container node-driver-registrar ready: false, restart count 6 Nov 26 03:29:00.639: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:27:35 +0000 UTC (0+7 container statuses recorded) Nov 26 03:29:00.639: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container hostpath ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 03:29:00.639: INFO: metadata-proxy-v0.1-fzfwr started at 2022-11-26 02:57:45 +0000 UTC (0+2 container statuses recorded) Nov 26 03:29:00.639: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:29:00.639: INFO: execpod-dropjvg4x started at 2022-11-26 03:10:25 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container agnhost-container ready: false, restart count 6 Nov 26 03:29:00.639: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:27:01 +0000 UTC (0+7 container statuses recorded) Nov 26 03:29:00.639: INFO: Container csi-attacher ready: true, restart count 2 Nov 26 03:29:00.639: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 03:29:00.639: INFO: Container csi-resizer ready: true, restart count 2 Nov 26 03:29:00.639: INFO: Container csi-snapshotter ready: true, restart count 2 Nov 26 03:29:00.639: INFO: Container hostpath ready: true, restart count 2 Nov 26 03:29:00.639: INFO: Container liveness-probe ready: true, restart count 2 Nov 26 03:29:00.639: INFO: Container node-driver-registrar ready: true, restart count 2 Nov 26 03:29:00.639: INFO: volume-prep-provisioning-2914 started at 2022-11-26 03:19:23 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container init-volume-provisioning-2914 ready: false, restart count 0 Nov 26 03:29:00.639: INFO: pod-cb1f5910-f737-44a4-bc9d-1e9f85571f6f started at 2022-11-26 03:21:45 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container write-pod ready: false, restart count 0 Nov 26 03:29:00.639: INFO: hostexec-bootstrap-e2e-minion-group-8wdk-jrkth started at 2022-11-26 03:27:51 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 03:29:00.639: INFO: ss-2 started at 2022-11-26 03:25:40 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container webserver ready: false, restart count 4 Nov 26 03:29:00.639: INFO: kube-dns-autoscaler-5f6455f985-54xp5 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container autoscaler ready: false, restart count 8 Nov 26 03:29:00.639: INFO: coredns-6d97d5ddb-6l6bj started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container coredns ready: false, restart count 8 Nov 26 03:29:00.639: INFO: konnectivity-agent-v4t28 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:00.639: INFO: Container konnectivity-agent ready: true, restart count 9 Nov 26 03:29:00.639: INFO: csi-mockplugin-0 started at 2022-11-26 03:13:53 +0000 UTC (0+4 container statuses recorded) Nov 26 03:29:00.639: INFO: Container busybox ready: false, restart count 5 Nov 26 03:29:00.639: INFO: Container csi-provisioner ready: true, restart count 6 Nov 26 03:29:00.639: INFO: Container driver-registrar ready: false, restart count 5 Nov 26 03:29:00.639: INFO: Container mock ready: false, restart count 5 Nov 26 03:29:00.639: INFO: pod-subpath-test-preprovisionedpv-79j2 started at 2022-11-26 03:28:01 +0000 UTC (1+2 container statuses recorded) Nov 26 03:29:00.639: INFO: Init container init-volume-preprovisionedpv-79j2 ready: true, restart count 1 Nov 26 03:29:00.639: INFO: Container test-container-subpath-preprovisionedpv-79j2 ready: true, restart count 1 Nov 26 03:29:00.639: INFO: Container test-container-volume-preprovisionedpv-79j2 ready: true, restart count 1 Nov 26 03:29:00.639: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:27:19 +0000 UTC (0+7 container statuses recorded) Nov 26 03:29:00.639: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container hostpath ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 03:29:00.639: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 03:29:01.060: INFO: Latency metrics for node bootstrap-e2e-minion-group-8wdk Nov 26 03:29:01.060: INFO: Logging node info for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:29:01.120: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-h8k8 d6f1c0c9-6047-409c-8381-ed8be6457456 17986 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-h8k8 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-h8k8 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-provisioning-1575":"bootstrap-e2e-minion-group-h8k8","csi-hostpath-provisioning-30":"bootstrap-e2e-minion-group-h8k8"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:45 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-11-26 03:27:34 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:27:34 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status} {node-problem-detector Update v1 2022-11-26 03:27:51 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-h8k8,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:27:51 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:27:34 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:27:34 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:27:34 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:27:34 +0000 UTC,LastTransitionTime:2022-11-26 02:57:45 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.177.45,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5d45caccbe73d6049c0c7b580c44ebb3,SystemUUID:5d45cacc-be73-d604-9c0c-7b580c44ebb3,BootID:f77edcc1-2ac4-4561-a5dd-42b928bec3f2,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:29:01.120: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:29:01.226: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-h8k8 Nov 26 03:29:01.409: INFO: csi-mockplugin-0 started at 2022-11-26 03:26:52 +0000 UTC (0+4 container statuses recorded) Nov 26 03:29:01.409: INFO: Container busybox ready: true, restart count 2 Nov 26 03:29:01.409: INFO: Container csi-provisioner ready: false, restart count 2 Nov 26 03:29:01.409: INFO: Container driver-registrar ready: false, restart count 2 Nov 26 03:29:01.409: INFO: Container mock ready: false, restart count 2 Nov 26 03:29:01.409: INFO: pod-902789dd-4230-4d2b-bb8c-860afaa1aeb6 started at 2022-11-26 03:27:01 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container write-pod ready: false, restart count 0 Nov 26 03:29:01.409: INFO: hostexec-bootstrap-e2e-minion-group-h8k8-5j2gh started at 2022-11-26 03:26:44 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 03:29:01.409: INFO: hostexec-bootstrap-e2e-minion-group-h8k8-6k8x5 started at 2022-11-26 03:25:20 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container agnhost-container ready: true, restart count 2 Nov 26 03:29:01.409: INFO: konnectivity-agent-t5nvv started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container konnectivity-agent ready: false, restart count 8 Nov 26 03:29:01.409: INFO: mutability-test-c2l54 started at 2022-11-26 03:25:46 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container netexec ready: true, restart count 0 Nov 26 03:29:01.409: INFO: ss-1 started at 2022-11-26 03:25:40 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container webserver ready: true, restart count 1 Nov 26 03:29:01.409: INFO: external-local-pods-td9g8 started at 2022-11-26 03:21:45 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container netexec ready: true, restart count 3 Nov 26 03:29:01.409: INFO: metadata-proxy-v0.1-9vqbj started at 2022-11-26 02:57:45 +0000 UTC (0+2 container statuses recorded) Nov 26 03:29:01.409: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:29:01.409: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:29:01.409: INFO: coredns-6d97d5ddb-x27zq started at 2022-11-26 02:58:05 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container coredns ready: false, restart count 9 Nov 26 03:29:01.409: INFO: netserver-2 started at 2022-11-26 03:27:50 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container webserver ready: true, restart count 1 Nov 26 03:29:01.409: INFO: pause-pod-deployment-648855d779-zz4dx started at 2022-11-26 03:21:48 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container agnhost-pause ready: true, restart count 1 Nov 26 03:29:01.409: INFO: csi-mockplugin-0 started at 2022-11-26 03:16:56 +0000 UTC (0+3 container statuses recorded) Nov 26 03:29:01.409: INFO: Container csi-provisioner ready: true, restart count 5 Nov 26 03:29:01.409: INFO: Container driver-registrar ready: true, restart count 5 Nov 26 03:29:01.409: INFO: Container mock ready: true, restart count 5 Nov 26 03:29:01.409: INFO: kube-proxy-bootstrap-e2e-minion-group-h8k8 started at 2022-11-26 02:57:44 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container kube-proxy ready: false, restart count 8 Nov 26 03:29:01.409: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:27:01 +0000 UTC (0+7 container statuses recorded) Nov 26 03:29:01.409: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 03:29:01.409: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 03:29:01.409: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 03:29:01.409: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 03:29:01.409: INFO: Container hostpath ready: true, restart count 0 Nov 26 03:29:01.409: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 03:29:01.409: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 03:29:01.409: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:27:14 +0000 UTC (0+7 container statuses recorded) Nov 26 03:29:01.409: INFO: Container csi-attacher ready: true, restart count 1 Nov 26 03:29:01.409: INFO: Container csi-provisioner ready: true, restart count 1 Nov 26 03:29:01.409: INFO: Container csi-resizer ready: true, restart count 1 Nov 26 03:29:01.409: INFO: Container csi-snapshotter ready: true, restart count 1 Nov 26 03:29:01.409: INFO: Container hostpath ready: true, restart count 1 Nov 26 03:29:01.409: INFO: Container liveness-probe ready: true, restart count 1 Nov 26 03:29:01.409: INFO: Container node-driver-registrar ready: true, restart count 1 Nov 26 03:29:01.409: INFO: csi-mockplugin-attacher-0 started at 2022-11-26 03:16:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:29:01.409: INFO: Container csi-attacher ready: false, restart count 4 Nov 26 03:29:01.931: INFO: Latency metrics for node bootstrap-e2e-minion-group-h8k8 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "esipp-7542" for this suite. 11/26/22 03:29:01.931
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\swork\sfor\stype\=LoadBalancer$'
test/e2e/framework/network/utils.go:727 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createTestPods(0xc000ea0380) test/e2e/framework/network/utils.go:727 +0x13e k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:766 +0x9f k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 +0x3e k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 +0x10a k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 +0x37f There were additional failures detected after the initial failure: [FAILED] Nov 26 03:37:00.593: failed to list events in namespace "esipp-1752": Get "https://34.168.169.190/api/v1/namespaces/esipp-1752/events": dial tcp 34.168.169.190:443: connect: connection refused In [DeferCleanup (Each)] at: test/e2e/framework/debug/dump.go:44 ---------- [FAILED] Nov 26 03:37:00.633: Couldn't delete ns: "esipp-1752": Delete "https://34.168.169.190/api/v1/namespaces/esipp-1752": dial tcp 34.168.169.190:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.169.190/api/v1/namespaces/esipp-1752", Err:(*net.OpError)(0xc002618b90)}) In [DeferCleanup (Each)] at: test/e2e/framework/framework.go:370from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 03:26:05.882 Nov 26 03:26:05.883: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 03:26:05.884 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 03:26:44.516 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 03:26:44.603 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1250 [It] should work for type=LoadBalancer test/e2e/network/loadbalancer.go:1266 STEP: creating a service esipp-1752/external-local-lb with type=LoadBalancer 11/26/22 03:26:48.214 STEP: setting ExternalTrafficPolicy=Local 11/26/22 03:26:48.214 STEP: waiting for loadbalancer for service esipp-1752/external-local-lb 11/26/22 03:26:48.27 Nov 26 03:26:48.270: INFO: Waiting up to 15m0s for service "external-local-lb" to have a LoadBalancer STEP: creating a pod to be part of the service external-local-lb 11/26/22 03:29:08.409 Nov 26 03:29:08.554: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 03:29:08.650: INFO: Found all 1 pods Nov 26 03:29:08.650: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [external-local-lb-lhlkz] Nov 26 03:29:08.650: INFO: Waiting up to 2m0s for pod "external-local-lb-lhlkz" in namespace "esipp-1752" to be "running and ready" Nov 26 03:29:08.868: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 217.831772ms Nov 26 03:29:08.868: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:10.919: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 2.268353158s Nov 26 03:29:10.919: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:12.941: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 4.291229689s Nov 26 03:29:12.941: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:14.935: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 6.284487795s Nov 26 03:29:14.935: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:16.916: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 8.265689425s Nov 26 03:29:16.916: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:19.030: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 10.379588071s Nov 26 03:29:19.030: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:20.951: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 12.300395394s Nov 26 03:29:20.951: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:22.948: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 14.297498623s Nov 26 03:29:22.948: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:24.946: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 16.295358453s Nov 26 03:29:24.946: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:26.925: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 18.275132385s Nov 26 03:29:26.925: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:28.953: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 20.302358208s Nov 26 03:29:28.953: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:30.918: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 22.267855521s Nov 26 03:29:30.918: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:32.971: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 24.320566827s Nov 26 03:29:32.971: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:34.962: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 26.311861451s Nov 26 03:29:34.962: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:36.930: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 28.279571629s Nov 26 03:29:36.930: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on '' to be 'Running' but was 'Pending' Nov 26 03:29:39.048: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 30.397744109s Nov 26 03:29:39.048: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on 'bootstrap-e2e-minion-group-7rps' to be 'Running' but was 'Pending' Nov 26 03:29:40.938: INFO: Pod "external-local-lb-lhlkz": Phase="Pending", Reason="", readiness=false. Elapsed: 32.288222939s Nov 26 03:29:40.938: INFO: Error evaluating pod condition running and ready: want pod 'external-local-lb-lhlkz' on 'bootstrap-e2e-minion-group-7rps' to be 'Running' but was 'Pending' Nov 26 03:29:42.919: INFO: Pod "external-local-lb-lhlkz": Phase="Running", Reason="", readiness=false. Elapsed: 34.268818s Nov 26 03:29:42.919: INFO: Error evaluating pod condition running and ready: pod 'external-local-lb-lhlkz' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC }] Nov 26 03:29:44.940: INFO: Pod "external-local-lb-lhlkz": Phase="Running", Reason="", readiness=false. Elapsed: 36.289636596s Nov 26 03:29:44.940: INFO: Error evaluating pod condition running and ready: pod 'external-local-lb-lhlkz' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC }] Nov 26 03:29:46.958: INFO: Pod "external-local-lb-lhlkz": Phase="Running", Reason="", readiness=false. Elapsed: 38.307339803s Nov 26 03:29:46.958: INFO: Error evaluating pod condition running and ready: pod 'external-local-lb-lhlkz' on 'bootstrap-e2e-minion-group-7rps' didn't have condition {Ready True}; conditions: [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC ContainersNotReady containers with unready status: [netexec]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:29:38 +0000 UTC }] Nov 26 03:29:48.967: INFO: Pod "external-local-lb-lhlkz": Phase="Running", Reason="", readiness=true. Elapsed: 40.316894612s Nov 26 03:29:48.967: INFO: Pod "external-local-lb-lhlkz" satisfied condition "running and ready" Nov 26 03:29:48.967: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [external-local-lb-lhlkz] STEP: waiting for loadbalancer for service esipp-1752/external-local-lb 11/26/22 03:29:48.967 Nov 26 03:29:48.967: INFO: Waiting up to 15m0s for service "external-local-lb" to have a LoadBalancer STEP: reading clientIP using the TCP service's service port via its external VIP 11/26/22 03:29:49.038 Nov 26 03:29:49.038: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:29:49.078: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:29:51.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:29:52.126: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:29:53.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:29:53.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:29:55.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:29:55.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:29:57.079: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:29:57.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:29:59.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:29:59.117: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:01.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:01.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:03.079: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:03.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:05.079: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:15.080: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 03:30:17.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:17.119: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:19.079: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:19.119: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:21.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:21.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:23.079: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:23.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:25.079: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:25.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:27.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:27.119: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:29.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:39.079: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 03:30:41.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:41.119: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:43.079: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:53.080: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 03:30:55.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:55.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:57.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:57.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:30:59.079: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:30:59.119: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:31:01.079: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:31:01.118: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": dial tcp 34.145.107.100:80: connect: connection refused Nov 26 03:31:03.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:31:13.078: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 03:31:15.079: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:31:25.080: INFO: Poke("http://34.145.107.100:80/clientip"): Get "http://34.145.107.100:80/clientip": context deadline exceeded (Client.Timeout exceeded while awaiting headers) Nov 26 03:31:27.078: INFO: Poking "http://34.145.107.100:80/clientip" Nov 26 03:31:27.158: INFO: Poke("http://34.145.107.100:80/clientip"): success Nov 26 03:31:27.158: INFO: ClientIP detected by target pod using VIP:SvcPort is 35.239.116.242:50344 STEP: checking if Source IP is preserved 11/26/22 03:31:27.158 Nov 26 03:31:27.493: INFO: Waiting up to 15m0s for service "external-local-lb" to have no LoadBalancer STEP: Performing setup for networking test in namespace esipp-1752 11/26/22 03:31:39.052 STEP: creating a selector 11/26/22 03:31:39.052 STEP: Creating the service pods in kubernetes 11/26/22 03:31:39.052 Nov 26 03:31:39.052: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable Nov 26 03:31:39.471: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "esipp-1752" to be "running and ready" Nov 26 03:31:39.541: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 70.414121ms Nov 26 03:31:39.541: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 26 03:31:41.612: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.141120118s Nov 26 03:31:41.612: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:31:43.782: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.311040757s Nov 26 03:31:43.782: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:31:45.634: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.163746197s Nov 26 03:31:45.635: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:31:47.685: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.21468492s Nov 26 03:31:47.685: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 5m42.333s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 5m0.001s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 9.163s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003241d10, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc002c69443, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc002c69443?, 0xc003547ea0?}, {0xc002a507e0?, 0xc000bf5808?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:31:49.649: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.177845273s Nov 26 03:31:49.649: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:31:51.678: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.207134865s Nov 26 03:31:51.678: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:31:53.629: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.158314161s Nov 26 03:31:53.629: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:31:55.616: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.145724823s Nov 26 03:31:55.616: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:31:57.660: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.189209578s Nov 26 03:31:57.660: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:31:59.697: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.226090139s Nov 26 03:31:59.697: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:01.610: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 22.139401025s Nov 26 03:32:01.610: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:03.702: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 24.231569209s Nov 26 03:32:03.702: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:05.645: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 26.174429996s Nov 26 03:32:05.645: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:07.615: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 28.143827975s Nov 26 03:32:07.615: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 6m2.336s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 5m20.005s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 29.167s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003241d10, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc002c69443, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc002c69443?, 0xc003547ea0?}, {0xc002a507e0?, 0xc000bf5808?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:32:09.676: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 30.20560575s Nov 26 03:32:09.676: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:11.658: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 32.18772628s Nov 26 03:32:11.658: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:13.610: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 34.139183595s Nov 26 03:32:13.610: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:15.622: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 36.150960799s Nov 26 03:32:15.622: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:17.615: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 38.144080776s Nov 26 03:32:17.615: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:19.771: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 40.300444292s Nov 26 03:32:19.771: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:21.632: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 42.160840843s Nov 26 03:32:21.632: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:23.682: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 44.211311556s Nov 26 03:32:23.682: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:25.624: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 46.153666313s Nov 26 03:32:25.624: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:27.608: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 48.136937621s Nov 26 03:32:27.608: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 6m22.339s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 5m40.007s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 49.169s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003241d10, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc002c69443, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc002c69443?, 0xc003547ea0?}, {0xc002a507e0?, 0xc000bf5808?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:32:29.671: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 50.199920336s Nov 26 03:32:29.671: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:31.633: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 52.16177262s Nov 26 03:32:31.633: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:33.627: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 54.155906383s Nov 26 03:32:33.627: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:35.620: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 56.149719753s Nov 26 03:32:35.620: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:37.635: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 58.164218432s Nov 26 03:32:37.635: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:39.655: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m0.184065453s Nov 26 03:32:39.655: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:41.606: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m2.134841417s Nov 26 03:32:41.606: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:43.682: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m4.211092278s Nov 26 03:32:43.682: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:45.808: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m6.337356564s Nov 26 03:32:45.808: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:47.628: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m8.157450145s Nov 26 03:32:47.628: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 6m42.341s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 6m0.01s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 1m9.172s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc003241d10, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc002c69443, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc002c69443?, 0xc003547ea0?}, {0xc002a507e0?, 0xc000bf5808?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:32:49.639: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m10.167921147s Nov 26 03:32:49.639: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:51.600: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m12.128950035s Nov 26 03:32:51.600: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:53.604: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m14.133707264s Nov 26 03:32:53.604: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:55.598: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m16.127239163s Nov 26 03:32:55.598: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:57.627: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m18.156624522s Nov 26 03:32:57.627: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:32:59.743: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m20.272206186s Nov 26 03:32:59.743: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:33:01.655: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=true. Elapsed: 1m22.184017542s Nov 26 03:33:01.655: INFO: The phase of Pod netserver-0 is Running (Ready = true) Nov 26 03:33:01.655: INFO: Pod "netserver-0" satisfied condition "running and ready" Nov 26 03:33:01.756: INFO: Waiting up to 5m0s for pod "netserver-1" in namespace "esipp-1752" to be "running and ready" Nov 26 03:33:01.809: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 53.082736ms Nov 26 03:33:01.809: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:03.860: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2.103844248s Nov 26 03:33:03.860: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:05.861: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 4.105068243s Nov 26 03:33:05.861: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:07.866: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 6.109244748s Nov 26 03:33:07.866: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 7m2.346s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 6m20.014s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 1m29.176s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef890, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc0042dd8f3, 0xb}, {0x75ee704, 0x11}, 0xc0009ea220?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc0042dd8f3?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:33:09.956: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 8.199461296s Nov 26 03:33:09.956: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:11.864: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 10.10798078s Nov 26 03:33:11.864: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:13.866: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 12.109644417s Nov 26 03:33:13.866: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:15.854: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 14.097979232s Nov 26 03:33:15.854: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:17.858: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 16.102087117s Nov 26 03:33:17.858: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:19.966: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 18.209191071s Nov 26 03:33:19.966: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:21.865: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 20.108367804s Nov 26 03:33:21.865: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:23.865: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 22.108394944s Nov 26 03:33:23.865: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:25.887: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 24.130262183s Nov 26 03:33:25.887: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:27.876: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 26.11917442s Nov 26 03:33:27.876: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 7m22.348s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 6m40.017s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 1m49.178s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef890, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc0042dd8f3, 0xb}, {0x75ee704, 0x11}, 0xc0009ea220?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc0042dd8f3?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:33:30.080: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 28.323931761s Nov 26 03:33:30.080: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:31.859: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 30.10269837s Nov 26 03:33:31.859: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:33.872: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 32.115152223s Nov 26 03:33:33.872: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:35.855: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 34.098602503s Nov 26 03:33:35.855: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:37.868: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 36.11153046s Nov 26 03:33:37.868: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:39.869: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 38.112829873s Nov 26 03:33:39.869: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:41.864: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 40.107414444s Nov 26 03:33:41.864: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:43.955: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 42.198164152s Nov 26 03:33:43.955: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:45.881: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 44.12420805s Nov 26 03:33:45.881: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:47.868: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 46.111274451s Nov 26 03:33:47.868: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 7m42.35s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 7m0.019s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 2m9.18s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef890, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc0042dd8f3, 0xb}, {0x75ee704, 0x11}, 0xc0009ea220?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc0042dd8f3?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:33:49.885: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 48.128327261s Nov 26 03:33:49.885: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:51.876: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 50.119694605s Nov 26 03:33:51.876: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:53.864: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 52.107474491s Nov 26 03:33:53.864: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:55.859: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 54.102959693s Nov 26 03:33:55.859: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:33:57.860: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 56.103344917s Nov 26 03:33:57.860: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:00.014: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 58.257951117s Nov 26 03:34:00.014: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:01.870: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m0.113379502s Nov 26 03:34:01.870: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:03.868: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m2.111349977s Nov 26 03:34:03.868: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:05.863: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m4.107007873s Nov 26 03:34:05.863: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:07.865: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m6.108324679s Nov 26 03:34:07.865: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 8m2.352s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 7m20.021s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 2m29.183s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef890, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc0042dd8f3, 0xb}, {0x75ee704, 0x11}, 0xc0009ea220?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc0042dd8f3?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:34:10.070: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m8.314113665s Nov 26 03:34:10.070: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:11.867: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m10.110502107s Nov 26 03:34:11.867: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:13.867: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m12.110780459s Nov 26 03:34:13.867: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:15.855: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m14.098823206s Nov 26 03:34:15.855: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:17.856: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m16.09918274s Nov 26 03:34:17.856: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:20.083: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m18.32691971s Nov 26 03:34:20.083: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:21.892: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m20.135248631s Nov 26 03:34:21.892: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:23.922: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m22.16592159s Nov 26 03:34:23.922: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:25.873: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m24.116976687s Nov 26 03:34:25.873: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:27.860: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m26.103545013s Nov 26 03:34:27.860: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 8m22.355s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 7m40.023s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 2m49.185s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef890, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc0042dd8f3, 0xb}, {0x75ee704, 0x11}, 0xc0009ea220?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc0042dd8f3?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:34:30.040: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m28.283786755s Nov 26 03:34:30.040: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:31.872: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m30.11574286s Nov 26 03:34:31.872: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:33.869: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m32.112591674s Nov 26 03:34:33.869: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:35.866: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m34.109647034s Nov 26 03:34:35.866: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:37.909: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m36.152229267s Nov 26 03:34:37.909: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:39.856: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m38.099828319s Nov 26 03:34:39.856: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:41.862: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m40.10583188s Nov 26 03:34:41.862: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:43.880: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m42.123799698s Nov 26 03:34:43.880: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:45.876: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m44.119794744s Nov 26 03:34:45.876: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:47.861: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m46.104921906s Nov 26 03:34:47.861: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 8m42.357s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 8m0.025s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 3m9.187s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef890, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc0042dd8f3, 0xb}, {0x75ee704, 0x11}, 0xc0009ea220?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc0042dd8f3?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:34:49.874: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m48.118011977s Nov 26 03:34:49.874: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:51.865: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m50.108457778s Nov 26 03:34:51.865: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:53.870: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m52.11349875s Nov 26 03:34:53.870: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:55.861: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m54.105147362s Nov 26 03:34:55.862: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:57.861: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m56.104309038s Nov 26 03:34:57.861: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:34:59.902: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 1m58.145227329s Nov 26 03:34:59.902: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:35:01.855: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2m0.098908574s Nov 26 03:35:01.855: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:35:03.853: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2m2.096693347s Nov 26 03:35:03.853: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:35:05.851: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2m4.09470279s Nov 26 03:35:05.851: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:35:07.853: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2m6.096155639s Nov 26 03:35:07.853: INFO: The phase of Pod netserver-1 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 9m2.36s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 8m20.028s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 3m29.19s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef890, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc0042dd8f3, 0xb}, {0x75ee704, 0x11}, 0xc0009ea220?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc0042dd8f3?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:35:09.851: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2m8.09471526s Nov 26 03:35:09.851: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:35:11.852: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2m10.095180744s Nov 26 03:35:11.852: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:35:13.865: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2m12.108652847s Nov 26 03:35:13.865: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:35:15.852: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2m14.095224912s Nov 26 03:35:15.852: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:35:17.852: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2m16.095282366s Nov 26 03:35:17.852: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:35:19.852: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=false. Elapsed: 2m18.095342713s Nov 26 03:35:19.852: INFO: The phase of Pod netserver-1 is Running (Ready = false) Nov 26 03:35:21.852: INFO: Pod "netserver-1": Phase="Running", Reason="", readiness=true. Elapsed: 2m20.09517245s Nov 26 03:35:21.852: INFO: The phase of Pod netserver-1 is Running (Ready = true) Nov 26 03:35:21.852: INFO: Pod "netserver-1" satisfied condition "running and ready" Nov 26 03:35:21.893: INFO: Waiting up to 5m0s for pod "netserver-2" in namespace "esipp-1752" to be "running and ready" Nov 26 03:35:21.934: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 40.639154ms Nov 26 03:35:21.934: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:23.976: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 2.083004668s Nov 26 03:35:23.976: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:25.979: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 4.085585859s Nov 26 03:35:25.979: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:27.975: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 6.081778904s Nov 26 03:35:27.975: INFO: The phase of Pod netserver-2 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 9m22.362s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 8m40.03s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 3m49.192s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef698, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc003495d13, 0xb}, {0x75ee704, 0x11}, 0xc0013dacb0?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc003495d13?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:35:29.975: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 8.082296396s Nov 26 03:35:29.975: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:31.975: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 10.081576996s Nov 26 03:35:31.975: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:33.976: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 12.082993324s Nov 26 03:35:33.976: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:35.981: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 14.087594346s Nov 26 03:35:35.981: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:37.993: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 16.099801179s Nov 26 03:35:37.993: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:39.993: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 18.099632956s Nov 26 03:35:39.993: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:41.976: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 20.083250399s Nov 26 03:35:41.976: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:43.977: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 22.083872022s Nov 26 03:35:43.977: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:45.980: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 24.086464069s Nov 26 03:35:45.980: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:47.976: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 26.083214199s Nov 26 03:35:47.976: INFO: The phase of Pod netserver-2 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 9m42.364s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 9m0.033s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 4m9.195s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef698, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc003495d13, 0xb}, {0x75ee704, 0x11}, 0xc0013dacb0?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc003495d13?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:35:49.976: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 28.083035449s Nov 26 03:35:49.976: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:51.976: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 30.082500473s Nov 26 03:35:51.976: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:53.976: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 32.082700848s Nov 26 03:35:53.976: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:55.976: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 34.0830228s Nov 26 03:35:55.976: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:57.976: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 36.083130463s Nov 26 03:35:57.976: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:35:59.977: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 38.083503677s Nov 26 03:35:59.977: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:01.977: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 40.083820862s Nov 26 03:36:01.977: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:03.975: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 42.082131913s Nov 26 03:36:03.975: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:05.975: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 44.08217579s Nov 26 03:36:05.975: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:07.974: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 46.081326443s Nov 26 03:36:07.974: INFO: The phase of Pod netserver-2 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 10m2.367s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 9m20.036s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 4m29.197s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef698, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc003495d13, 0xb}, {0x75ee704, 0x11}, 0xc0013dacb0?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc003495d13?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:36:09.975: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 48.081628386s Nov 26 03:36:09.975: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:12.282: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 50.389349967s Nov 26 03:36:12.282: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:14.016: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 52.123331975s Nov 26 03:36:14.016: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:15.988: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 54.094528158s Nov 26 03:36:15.988: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:17.987: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 56.094051515s Nov 26 03:36:17.987: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:20.058: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 58.164422972s Nov 26 03:36:20.058: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:21.996: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m0.102526958s Nov 26 03:36:21.996: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:23.994: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m2.101101813s Nov 26 03:36:23.994: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:26.024: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m4.130860469s Nov 26 03:36:26.024: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:27.981: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m6.087731976s Nov 26 03:36:27.981: INFO: The phase of Pod netserver-2 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 10m22.37s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 9m40.038s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 4m49.2s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef698, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc003495d13, 0xb}, {0x75ee704, 0x11}, 0xc0013dacb0?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc003495d13?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:36:29.994: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m8.100691898s Nov 26 03:36:29.994: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:32.005: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m10.112147785s Nov 26 03:36:32.005: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:33.989: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m12.0962124s Nov 26 03:36:33.989: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:35.986: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m14.093351628s Nov 26 03:36:35.986: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:38.037: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m16.143889745s Nov 26 03:36:38.037: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:39.993: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m18.100122708s Nov 26 03:36:39.993: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:41.983: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m20.089590777s Nov 26 03:36:41.983: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:44.010: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m22.116486654s Nov 26 03:36:44.010: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:45.985: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m24.091929813s Nov 26 03:36:45.985: INFO: The phase of Pod netserver-2 is Running (Ready = false) Nov 26 03:36:47.985: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=false. Elapsed: 1m26.092031888s Nov 26 03:36:47.985: INFO: The phase of Pod netserver-2 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #9 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=LoadBalancer (Spec Runtime: 10m42.372s) test/e2e/network/loadbalancer.go:1266 In [It] (Node Runtime: 10m0.04s) test/e2e/network/loadbalancer.go:1266 At [By Step] Creating the service pods in kubernetes (Step Runtime: 5m9.202s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 3271 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0000820c8}, 0xc0029ef698, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0000820c8}, 0x70?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0000820c8}, 0x75b521a?, 0xc000bf55c0?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc004292680}, {0xc002a507e0, 0xa}, {0xc003495d13, 0xb}, {0x75ee704, 0x11}, 0xc0013dacb0?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc004292680?}, {0xc003495d13?, 0x0?}, {0xc002a507e0?, 0x0?}, 0xc0000da660?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc000ea0380, {0x75c6f7c, 0x9}, 0xc001e04b40) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 > k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00130ed80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:36:50.043: INFO: Pod "netserver-2": Phase="Running", Reason="", readiness=true. Elapsed: 1m28.149589886s Nov 26 03:36:50.043: INFO: The phase of Pod netserver-2 is Running (Ready = true) Nov 26 03:36:50.043: INFO: Pod "netserver-2" satisfied condition "running and ready" STEP: Creating test pods 11/26/22 03:36:50.096 Nov 26 03:36:50.246: INFO: Waiting up to 5m0s for pod "test-container-pod" in namespace "esipp-1752" to be "running" Nov 26 03:36:50.333: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 86.992164ms Nov 26 03:36:52.469: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 2.223066768s Nov 26 03:36:54.550: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 4.304113789s Nov 26 03:36:56.439: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 6.193119952s Nov 26 03:36:58.435: INFO: Pod "test-container-pod": Phase="Pending", Reason="", readiness=false. Elapsed: 8.188555672s Nov 26 03:37:00.373: INFO: Encountered non-retryable error while getting pod esipp-1752/test-container-pod: Get "https://34.168.169.190/api/v1/namespaces/esipp-1752/pods/test-container-pod": dial tcp 34.168.169.190:443: connect: connection refused Nov 26 03:37:00.373: INFO: Unexpected error: <*fmt.wrapError | 0xc0013879c0>: { msg: "error while waiting for pod esipp-1752/test-container-pod to be running: Get \"https://34.168.169.190/api/v1/namespaces/esipp-1752/pods/test-container-pod\": dial tcp 34.168.169.190:443: connect: connection refused", err: <*url.Error | 0xc003706a80>{ Op: "Get", URL: "https://34.168.169.190/api/v1/namespaces/esipp-1752/pods/test-container-pod", Err: <*net.OpError | 0xc0026186e0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc001b02360>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 169, 190], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc001387980>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, }, } Nov 26 03:37:00.373: FAIL: error while waiting for pod esipp-1752/test-container-pod to be running: Get "https://34.168.169.190/api/v1/namespaces/esipp-1752/pods/test-container-pod": dial tcp 34.168.169.190:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createTestPods(0xc000ea0380) test/e2e/framework/network/utils.go:727 +0x13e k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc000ea0380, 0x7fe696fd0ef8?) test/e2e/framework/network/utils.go:766 +0x9f k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc000ea0380, 0x3c?) test/e2e/framework/network/utils.go:778 +0x3e k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc000cc8000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.3.1() test/e2e/network/loadbalancer.go:1285 +0x10a k8s.io/kubernetes/test/e2e/network.glob..func20.3() test/e2e/network/loadbalancer.go:1312 +0x37f [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 03:37:00.374: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 Nov 26 03:37:00.413: INFO: Output of kubectl describe svc: Nov 26 03:37:00.413: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-1752 describe svc --namespace=esipp-1752' Nov 26 03:37:00.552: INFO: rc: 1 Nov 26 03:37:00.552: INFO: [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 03:37:00.553 STEP: Collecting events from namespace "esipp-1752". 11/26/22 03:37:00.553 Nov 26 03:37:00.593: INFO: Unexpected error: failed to list events in namespace "esipp-1752": <*url.Error | 0xc00329a960>: { Op: "Get", URL: "https://34.168.169.190/api/v1/namespaces/esipp-1752/events", Err: <*net.OpError | 0xc001cb32c0>{ Op: "dial", Net: "tcp", Source: nil, Addr: <*net.TCPAddr | 0xc00329a930>{ IP: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 34, 168, 169, 190], Port: 443, Zone: "", }, Err: <*os.SyscallError | 0xc0036758a0>{ Syscall: "connect", Err: <syscall.Errno>0x6f, }, }, } Nov 26 03:37:00.593: FAIL: failed to list events in namespace "esipp-1752": Get "https://34.168.169.190/api/v1/namespaces/esipp-1752/events": dial tcp 34.168.169.190:443: connect: connection refused Full Stack Trace k8s.io/kubernetes/test/e2e/framework/debug.dumpEventsInNamespace(0xc000bf05c0, {0xc002a507e0, 0xa}) test/e2e/framework/debug/dump.go:44 +0x191 k8s.io/kubernetes/test/e2e/framework/debug.DumpAllNamespaceInfo({0x801de88, 0xc004292680}, {0xc002a507e0, 0xa}) test/e2e/framework/debug/dump.go:62 +0x8d k8s.io/kubernetes/test/e2e/framework/debug/init.init.0.func1.1(0xc000bf0650?, {0xc002a507e0?, 0x7fa7740?}) test/e2e/framework/debug/init/init.go:34 +0x32 k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo.func1() test/e2e/framework/framework.go:274 +0x6d k8s.io/kubernetes/test/e2e/framework.(*Framework).dumpNamespaceInfo(0xc000cc8000) test/e2e/framework/framework.go:271 +0x179 reflect.Value.call({0x6627cc0?, 0xc0013b32d0?, 0xc0041b1f50?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc0013b32d0?, 0x7fadfa0?}, {0xae73300?, 0xc0041b1f80?, 0x26225bd?}) /usr/local/go/src/reflect/value.go:368 +0xbc [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "esipp-1752" for this suite. 11/26/22 03:37:00.593 Nov 26 03:37:00.633: FAIL: Couldn't delete ns: "esipp-1752": Delete "https://34.168.169.190/api/v1/namespaces/esipp-1752": dial tcp 34.168.169.190:443: connect: connection refused (&url.Error{Op:"Delete", URL:"https://34.168.169.190/api/v1/namespaces/esipp-1752", Err:(*net.OpError)(0xc002618b90)}) Full Stack Trace k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach.func1() test/e2e/framework/framework.go:370 +0x4fe k8s.io/kubernetes/test/e2e/framework.(*Framework).AfterEach(0xc000cc8000) test/e2e/framework/framework.go:383 +0x1ca reflect.Value.call({0x6627cc0?, 0xc0013b3230?, 0xc003bb1fb0?}, {0x75b6e72, 0x4}, {0xae73300, 0x0, 0x0?}) /usr/local/go/src/reflect/value.go:584 +0x8c5 reflect.Value.Call({0x6627cc0?, 0xc0013b3230?, 0x0?}, {0xae73300?, 0x5?, 0xc0036ab9f8?}) /usr/local/go/src/reflect/value.go:368 +0xbc
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\swork\sfor\stype\=NodePort$'
test/e2e/framework/network/utils.go:866 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc00105fc00, {0x75c6f7c, 0x9}, 0xc0036328a0) test/e2e/framework/network/utils.go:866 +0x1d0 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc00105fc00, 0x7fc6303d7438?) test/e2e/framework/network/utils.go:763 +0x55 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc00105fc00, 0x3c?) test/e2e/framework/network/utils.go:778 +0x3e k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001318000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.4() test/e2e/network/loadbalancer.go:1332 +0x145from junit_01.xml
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 03:06:16.499 Nov 26 03:06:16.499: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 03:06:16.501 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 03:06:16.714 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 03:06:16.803 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1250 [It] should work for type=NodePort test/e2e/network/loadbalancer.go:1314 STEP: creating a service esipp-8765/external-local-nodeport with type=NodePort and ExternalTrafficPolicy=Local 11/26/22 03:06:17.023 STEP: creating a pod to be part of the service external-local-nodeport 11/26/22 03:06:17.107 Nov 26 03:06:17.182: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 03:06:17.257: INFO: Found all 1 pods Nov 26 03:06:17.257: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [external-local-nodeport-mp5bd] Nov 26 03:06:17.257: INFO: Waiting up to 2m0s for pod "external-local-nodeport-mp5bd" in namespace "esipp-8765" to be "running and ready" Nov 26 03:06:17.329: INFO: Pod "external-local-nodeport-mp5bd": Phase="Pending", Reason="", readiness=false. Elapsed: 72.216627ms Nov 26 03:06:17.329: INFO: Error evaluating pod condition running and ready: want pod 'external-local-nodeport-mp5bd' on 'bootstrap-e2e-minion-group-8wdk' to be 'Running' but was 'Pending' Nov 26 03:06:19.398: INFO: Pod "external-local-nodeport-mp5bd": Phase="Running", Reason="", readiness=true. Elapsed: 2.141688245s Nov 26 03:06:19.399: INFO: Pod "external-local-nodeport-mp5bd" satisfied condition "running and ready" Nov 26 03:06:19.399: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [external-local-nodeport-mp5bd] STEP: Performing setup for networking test in namespace esipp-8765 11/26/22 03:06:20.566 STEP: creating a selector 11/26/22 03:06:20.566 STEP: Creating the service pods in kubernetes 11/26/22 03:06:20.566 Nov 26 03:06:20.566: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable Nov 26 03:06:20.910: INFO: Waiting up to 5m0s for pod "netserver-0" in namespace "esipp-8765" to be "running and ready" Nov 26 03:06:20.969: INFO: Pod "netserver-0": Phase="Pending", Reason="", readiness=false. Elapsed: 58.693114ms Nov 26 03:06:20.969: INFO: The phase of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) Nov 26 03:06:23.040: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2.130164993s Nov 26 03:06:23.040: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:25.034: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4.123582957s Nov 26 03:06:25.034: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:27.022: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 6.11156029s Nov 26 03:06:27.022: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:29.129: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 8.219087654s Nov 26 03:06:29.129: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:31.031: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 10.120451635s Nov 26 03:06:31.031: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:33.107: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 12.197169707s Nov 26 03:06:33.107: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:35.061: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 14.151003429s Nov 26 03:06:35.061: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:37.032: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 16.121727508s Nov 26 03:06:37.032: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:39.036: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 18.125835692s Nov 26 03:06:39.036: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:41.027: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 20.117220491s Nov 26 03:06:41.027: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:43.028: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 22.117422626s Nov 26 03:06:43.028: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:45.081: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 24.171215483s Nov 26 03:06:45.081: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:47.022: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 26.111736763s Nov 26 03:06:47.022: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:49.113: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 28.203161923s Nov 26 03:06:49.113: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:51.021: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 30.111161322s Nov 26 03:06:51.021: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:53.048: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 32.137842114s Nov 26 03:06:53.048: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:55.054: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 34.144027565s Nov 26 03:06:55.054: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:57.052: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 36.142390088s Nov 26 03:06:57.053: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:06:59.048: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 38.137963636s Nov 26 03:06:59.048: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:01.035: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 40.125256516s Nov 26 03:07:01.035: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:03.028: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 42.117614525s Nov 26 03:07:03.028: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:05.023: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 44.112906229s Nov 26 03:07:05.023: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:07.076: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 46.166317621s Nov 26 03:07:07.076: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:09.025: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 48.114765962s Nov 26 03:07:09.025: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:11.018: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 50.108205185s Nov 26 03:07:11.018: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:38.323: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m17.412577413s Nov 26 03:07:38.323: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:39.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m18.100720097s Nov 26 03:07:39.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:41.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m20.100581664s Nov 26 03:07:41.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:43.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m22.100521684s Nov 26 03:07:43.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:45.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m24.10115666s Nov 26 03:07:45.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:47.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m26.10028516s Nov 26 03:07:47.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:49.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m28.100647626s Nov 26 03:07:49.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:51.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m30.100430328s Nov 26 03:07:51.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:53.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m32.100752507s Nov 26 03:07:53.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:55.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m34.100254902s Nov 26 03:07:55.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:57.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m36.100781882s Nov 26 03:07:57.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:07:59.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m38.100733431s Nov 26 03:07:59.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:01.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m40.100642046s Nov 26 03:08:01.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:03.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m42.100571264s Nov 26 03:08:03.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:05.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m44.100866493s Nov 26 03:08:05.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:07.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m46.100720263s Nov 26 03:08:07.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:09.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m48.099883328s Nov 26 03:08:09.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:11.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m50.101000038s Nov 26 03:08:11.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:13.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m52.100041952s Nov 26 03:08:13.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:15.012: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m54.102318027s Nov 26 03:08:15.012: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:17.012: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m56.101661108s Nov 26 03:08:17.012: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:19.012: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 1m58.102041766s Nov 26 03:08:19.012: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:21.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m0.100593291s Nov 26 03:08:21.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:23.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m2.100886366s Nov 26 03:08:23.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:25.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m4.100764743s Nov 26 03:08:25.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:27.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m6.101114971s Nov 26 03:08:27.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:29.012: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m8.101908538s Nov 26 03:08:29.012: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:31.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m10.099764322s Nov 26 03:08:31.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:33.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m12.10037305s Nov 26 03:08:33.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:35.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m14.101135144s Nov 26 03:08:35.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:37.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m16.100474834s Nov 26 03:08:37.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:39.047: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m18.137105056s Nov 26 03:08:39.047: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:41.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m20.099796868s Nov 26 03:08:41.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:43.017: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m22.10657237s Nov 26 03:08:43.017: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:45.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m24.100158527s Nov 26 03:08:45.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:47.030: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m26.12015214s Nov 26 03:08:47.030: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:49.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m28.100701254s Nov 26 03:08:49.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:51.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m30.100273067s Nov 26 03:08:51.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:53.028: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m32.118382124s Nov 26 03:08:53.029: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:55.052: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m34.141728302s Nov 26 03:08:55.052: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:57.029: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m36.118680947s Nov 26 03:08:57.029: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:08:59.048: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m38.138299342s Nov 26 03:08:59.048: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:01.018: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m40.108202814s Nov 26 03:09:01.018: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:03.030: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m42.11972655s Nov 26 03:09:03.030: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:05.043: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m44.133380485s Nov 26 03:09:05.044: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:07.032: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m46.121857351s Nov 26 03:09:07.032: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:09.119: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m48.208936123s Nov 26 03:09:09.119: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:11.045: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m50.134479369s Nov 26 03:09:11.045: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:13.023: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m52.113259242s Nov 26 03:09:13.023: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:15.026: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m54.116354093s Nov 26 03:09:15.026: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:17.019: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m56.10924211s Nov 26 03:09:17.019: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:19.025: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 2m58.115241349s Nov 26 03:09:19.025: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:21.021: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m0.111041147s Nov 26 03:09:21.021: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:23.028: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m2.118118069s Nov 26 03:09:23.028: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:25.034: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m4.123503357s Nov 26 03:09:25.034: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:27.016: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m6.105799928s Nov 26 03:09:27.016: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:29.027: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m8.117191531s Nov 26 03:09:29.027: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:31.062: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m10.152054734s Nov 26 03:09:31.062: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:33.034: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m12.123402626s Nov 26 03:09:33.034: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:35.031: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m14.121364232s Nov 26 03:09:35.031: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:37.044: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m16.134204726s Nov 26 03:09:37.044: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:39.084: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m18.173945977s Nov 26 03:09:39.084: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:41.030: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m20.119529873s Nov 26 03:09:41.030: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:43.047: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m22.137308764s Nov 26 03:09:43.047: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:45.060: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m24.15005144s Nov 26 03:09:45.060: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:47.055: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m26.144451678s Nov 26 03:09:47.055: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:49.024: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m28.113540345s Nov 26 03:09:49.024: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:51.027: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m30.11670485s Nov 26 03:09:51.027: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:53.048: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m32.138326367s Nov 26 03:09:53.048: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:55.034: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m34.123536016s Nov 26 03:09:55.034: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:57.037: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m36.127090621s Nov 26 03:09:57.037: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:09:59.071: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m38.160914739s Nov 26 03:09:59.071: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:01.032: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m40.12170933s Nov 26 03:10:01.032: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:03.019: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m42.108748774s Nov 26 03:10:03.019: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:05.042: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m44.131797911s Nov 26 03:10:05.042: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:07.021: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m46.110601074s Nov 26 03:10:07.021: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:09.031: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m48.121158103s Nov 26 03:10:09.031: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:11.018: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m50.107956778s Nov 26 03:10:11.018: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:13.025: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m52.115178225s Nov 26 03:10:13.025: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:15.150: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m54.239562899s Nov 26 03:10:15.150: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:17.036: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m56.125858549s Nov 26 03:10:17.036: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:19.026: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 3m58.116165946s Nov 26 03:10:19.026: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:21.086: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m0.17605167s Nov 26 03:10:21.086: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:23.026: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m2.115766957s Nov 26 03:10:23.026: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:25.033: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m4.122569111s Nov 26 03:10:25.033: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:27.046: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m6.136141161s Nov 26 03:10:27.046: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:29.012: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m8.102179176s Nov 26 03:10:29.012: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:31.022: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m10.11210094s Nov 26 03:10:31.022: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:33.054: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m12.143920936s Nov 26 03:10:33.054: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:35.034: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m14.123506546s Nov 26 03:10:35.034: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:37.030: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m16.119449764s Nov 26 03:10:37.030: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:39.038: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m18.12826024s Nov 26 03:10:39.038: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:41.024: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m20.113955164s Nov 26 03:10:41.024: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:43.026: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m22.115837543s Nov 26 03:10:43.026: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:45.035: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m24.125373625s Nov 26 03:10:45.036: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:47.018: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m26.107463163s Nov 26 03:10:47.018: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:49.059: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m28.14898841s Nov 26 03:10:49.059: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:51.036: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m30.126165041s Nov 26 03:10:51.036: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:53.015: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m32.105163675s Nov 26 03:10:53.015: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:55.112: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m34.202055146s Nov 26 03:10:55.112: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:57.015: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m36.1048833s Nov 26 03:10:57.015: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:10:59.051: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m38.140470377s Nov 26 03:10:59.051: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:01.052: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m40.141603282s Nov 26 03:11:01.052: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:03.061: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m42.150523661s Nov 26 03:11:03.061: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:05.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m44.099991421s Nov 26 03:11:05.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:07.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m46.099415151s Nov 26 03:11:07.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:09.013: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m48.10288103s Nov 26 03:11:09.013: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:11.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m50.100102696s Nov 26 03:11:11.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:13.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m52.10051798s Nov 26 03:11:13.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:15.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m54.100103772s Nov 26 03:11:15.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:17.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m56.100787167s Nov 26 03:11:17.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) ------------------------------ Progress Report for Ginkgo Process #3 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work for type=NodePort (Spec Runtime: 5m0.525s) test/e2e/network/loadbalancer.go:1314 In [It] (Node Runtime: 5m0.001s) test/e2e/network/loadbalancer.go:1314 At [By Step] Creating the service pods in kubernetes (Step Runtime: 4m56.458s) test/e2e/framework/network/utils.go:761 Spec Goroutine goroutine 2580 [select] k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0001b0000}, 0xc00452bb48, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:660 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0001b0000}, 0x48?, 0x2fd9d05?, 0x70?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0001b0000}, 0x75b521a?, 0xc003d87698?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0x75b6f82?, 0x4?, 0x76f3c92?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 k8s.io/kubernetes/test/e2e/framework/pod.WaitForPodCondition({0x801de88?, 0xc0041aa9c0}, {0xc003408330, 0xa}, {0xc003625f83, 0xb}, {0x75ee704, 0x11}, 0x7f8f401?, 0x7895ad0) test/e2e/framework/pod/wait.go:290 k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodReadyInNamespace({0x801de88?, 0xc0041aa9c0?}, {0xc003625f83?, 0xc004a75640?}, {0xc003408330?, 0xc003ec78e0?}, 0x271e5fe?) test/e2e/framework/pod/wait.go:564 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc00105fc00, {0x75c6f7c, 0x9}, 0xc0036328a0) test/e2e/framework/network/utils.go:866 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc00105fc00, 0x7fc6303d7438?) test/e2e/framework/network/utils.go:763 > k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc00105fc00, 0x3c?) test/e2e/framework/network/utils.go:778 > k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001318000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 > k8s.io/kubernetes/test/e2e/network.glob..func20.4() test/e2e/network/loadbalancer.go:1332 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000e58000}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:11:19.011: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 4m58.100749931s Nov 26 03:11:19.011: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:21.010: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 5m0.10012249s Nov 26 03:11:21.010: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:21.051: INFO: Pod "netserver-0": Phase="Running", Reason="", readiness=false. Elapsed: 5m0.140676448s Nov 26 03:11:21.051: INFO: The phase of Pod netserver-0 is Running (Ready = false) Nov 26 03:11:21.053: INFO: Unexpected error: <*pod.timeoutError | 0xc001ee5560>: { msg: "timed out while waiting for pod esipp-8765/netserver-0 to be running and ready", observedObjects: [ <*v1.Pod | 0xc004079b00>{ TypeMeta: {Kind: "", APIVersion: ""}, ObjectMeta: { Name: "netserver-0", GenerateName: "", Namespace: "esipp-8765", SelfLink: "", UID: "2b794c5a-9fae-4ca9-ad90-23861d1c2bd1", ResourceVersion: "9238", Generation: 0, CreationTimestamp: { Time: { wall: 0, ext: 63805028780, loc: { name: "Local", zone: [ {name: "UTC", offset: 0, isDST: false}, ], tx: [ { when: -576460752303423488, index: 0, isstd: false, isutc: false, }, ], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: "UTC", offset: 0, isDST: false}, }, }, }, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: { "selector-ac4a8122-23b0-402b-beb0-b4ba936cb08f": "true", }, Annotations: nil, OwnerReferences: nil, Finalizers: nil, ManagedFields: [ { Manager: "e2e.test", Operation: "Update", APIVersion: "v1", Time: { Time: { wall: 0, ext: 63805028780, loc: { name: "Local", zone: [...], tx: [...], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: ..., offset: ..., isDST: ...}, }, }, }, FieldsType: "FieldsV1", FieldsV1: { Raw: "{\"f:metadata\":{\"f:labels\":{\".\":{},\"f:selector-ac4a8122-23b0-402b-beb0-b4ba936cb08f\":{}}},\"f:spec\":{\"f:containers\":{\"k:{\\\"name\\\":\\\"webserver\\\"}\":{\".\":{},\"f:args\":{},\"f:image\":{},\"f:imagePullPolicy\":{},\"f:livenessProbe\":{\".\":{},\"f:failureThreshold\":{},\"f:httpGet\":{\".\":{},\"f:path\":{},\"f:port\":{},\"f:scheme\":{}},\"f:initialDelaySeconds\":{},\"f:periodSeconds\":{},\"f:successThreshold\":{},\"f:timeoutSeconds\":{}},\"f:name\":{},\"f:ports\":{\".\":{},\"k:{\\\"containerPort\\\":8081,\\\"protocol\\\":\\\"UDP\\\"}\":{\".\":{},\"f:containerPort\":{},\"f:name\":{},\"f:protocol\":{}},\"k:{\\\"containerPort\\\":8083,\\\"protocol\\\":\\\"TCP\\\"}\":{\".\":{},\"f:containerPort\":{},\"f:name\":{},\"f:protocol\":{}}},\"f:readinessProbe\":{\".\":{},\"f:failureThreshold\":{},\"f:httpGet\":{\".\":{},\"f:path\":{},\"f:port\":{},\"f:scheme\":{}},\"f:initialDelaySeconds\":{},\"f:periodSeconds\":{},\"f:successThreshold\":{},\"f:timeoutSeconds\":{}},\"f:resources\":{},\"f:terminationMessagePath\":{},\"f:terminationMessagePolicy\":{}}},\"f:dnsPolicy\":{},\"f:enableServiceLinks\":{},\"f:nodeSelector\":{},\"f:restartPolicy\":{},\"f:schedulerName\":{},\"f:securityContext\":{},\"f:terminationGracePeriodSeconds\":{}}}", }, Subresource: "", }, { Manager: "kubelet", Operation: "Update", APIVersion: "v1", Time: { Time: { wall: 0, ext: 63805028983, loc: { name: "Local", zone: [...], tx: [...], extend: "UTC0", cacheStart: 9223372036854775807, cacheEnd: 9223372036854775807, cacheZone: {name: ..., offset: ..., isDST: ...}, }, }, }, FieldsType: "FieldsV1", FieldsV1: { Raw: "{\"f:status\":{\"f:conditions\":{\"k:{\\\"type\\\":\\\"ContainersReady\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:message\":{},\"f:reason\":{},\"f:status\":{},\"f:type\":{}},\"k:{\\\"type\\\":\\\"Initialized\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:status\":{},\"f:type\":{}},\"k:{\\\"type\\\":\\\"Ready\\\"}\":{\".\":{},\"f:lastProbeTime\":{},\"f:lastTransitionTime\":{},\"f:message\":{},\"f:reason\":{},\"f:status\":{},\"f:type\":{}}},\"f:containerStatuses\":{},\"f:hostIP\":{},\"f:phase\":{},\"f:podIP\":{},\"f:podIPs\":{\".\":{},\"k:{\\\"ip\\\":\\\"10.64.0.123\\\"}\":{\".\":{},\"f:ip\":{}}},\"f:startTime\":{}}}", }, Subresource: "status", }, ], }, Spec: { Volumes: [ { Name: "kube-api-access-sfvch", VolumeSource: { HostPath: nil, EmptyDir: nil, GCEPersistentDisk: nil, AWSElasticBlockStore: nil, GitRepo: nil, Secret: nil, NFS: nil, ISCSI: nil, Glusterfs: nil, PersistentVolumeClaim: nil, RBD: nil, FlexVolume: nil, Cinder: nil, CephFS: nil, Flocker: nil, DownwardAPI: nil, FC: nil, AzureFile: nil, ConfigMap: nil, VsphereVolume: nil, Quobyte: nil, AzureDisk: nil, PhotonPersistentDisk: nil, Projected: { Sources: [ { Secret: ..., DownwardAPI: ..., ConfigMap: ..., ... Gomega truncated this representation as it exceeds 'format.MaxLength'. Consider having the object provide a custom 'GomegaStringer' representation or adjust the parameters in Gomega's 'format' package. Learn more here: https://onsi.github.io/gomega/#adjusting-output Nov 26 03:11:21.053: FAIL: timed out while waiting for pod esipp-8765/netserver-0 to be running and ready Full Stack Trace k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).createNetProxyPods(0xc00105fc00, {0x75c6f7c, 0x9}, 0xc0036328a0) test/e2e/framework/network/utils.go:866 +0x1d0 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setupCore(0xc00105fc00, 0x7fc6303d7438?) test/e2e/framework/network/utils.go:763 +0x55 k8s.io/kubernetes/test/e2e/framework/network.(*NetworkingTestConfig).setup(0xc00105fc00, 0x3c?) test/e2e/framework/network/utils.go:778 +0x3e k8s.io/kubernetes/test/e2e/framework/network.NewNetworkingTestConfig(0xc001318000, {0x0, 0x0, 0x0?}) test/e2e/framework/network/utils.go:131 +0x125 k8s.io/kubernetes/test/e2e/network.glob..func20.4() test/e2e/network/loadbalancer.go:1332 +0x145 [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/node/init/init.go:32 Nov 26 03:11:21.111: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1260 Nov 26 03:11:21.155: INFO: Output of kubectl describe svc: Nov 26 03:11:21.155: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-8765 describe svc --namespace=esipp-8765' Nov 26 03:11:21.386: INFO: stderr: "No resources found in esipp-8765 namespace.\n" Nov 26 03:11:21.386: INFO: stdout: "" Nov 26 03:11:21.386: INFO: [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 11/26/22 03:11:21.386 STEP: Collecting events from namespace "esipp-8765". 11/26/22 03:11:21.386 STEP: Found 29 events. 11/26/22 03:11:21.428 Nov 26 03:11:21.428: INFO: At 2022-11-26 03:06:17 +0000 UTC - event for external-local-nodeport: {replication-controller } SuccessfulCreate: Created pod: external-local-nodeport-mp5bd Nov 26 03:11:21.428: INFO: At 2022-11-26 03:06:17 +0000 UTC - event for external-local-nodeport-mp5bd: {default-scheduler } Scheduled: Successfully assigned esipp-8765/external-local-nodeport-mp5bd to bootstrap-e2e-minion-group-8wdk Nov 26 03:11:21.428: INFO: At 2022-11-26 03:06:18 +0000 UTC - event for external-local-nodeport-mp5bd: {kubelet bootstrap-e2e-minion-group-8wdk} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:11:21.428: INFO: At 2022-11-26 03:06:18 +0000 UTC - event for external-local-nodeport-mp5bd: {kubelet bootstrap-e2e-minion-group-8wdk} Created: Created container netexec Nov 26 03:11:21.428: INFO: At 2022-11-26 03:06:18 +0000 UTC - event for external-local-nodeport-mp5bd: {kubelet bootstrap-e2e-minion-group-8wdk} Started: Started container netexec Nov 26 03:11:21.428: INFO: At 2022-11-26 03:06:20 +0000 UTC - event for netserver-0: {default-scheduler } Scheduled: Successfully assigned esipp-8765/netserver-0 to bootstrap-e2e-minion-group-7rps Nov 26 03:11:21.428: INFO: At 2022-11-26 03:06:20 +0000 UTC - event for netserver-1: {default-scheduler } Scheduled: Successfully assigned esipp-8765/netserver-1 to bootstrap-e2e-minion-group-8wdk Nov 26 03:11:21.428: INFO: At 2022-11-26 03:06:20 +0000 UTC - event for netserver-2: {default-scheduler } Scheduled: Successfully assigned esipp-8765/netserver-2 to bootstrap-e2e-minion-group-h8k8 Nov 26 03:11:21.428: INFO: At 2022-11-26 03:06:21 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Created: Created container webserver Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:21 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:21 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Started: Started container webserver Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:21 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} Killing: Stopping container webserver Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:21 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:21 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Created: Created container webserver Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:21 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Started: Started container webserver Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:21 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Started: Started container webserver Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:21 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Created: Created container webserver Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:21 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Pulled: Container image "registry.k8s.io/e2e-test-images/agnhost:2.43" already present on machine Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:22 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:22 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} Killing: Stopping container webserver Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:23 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:26 +0000 UTC - event for netserver-0: {kubelet bootstrap-e2e-minion-group-7rps} BackOff: Back-off restarting failed container webserver in pod netserver-0_esipp-8765(2b794c5a-9fae-4ca9-ad90-23861d1c2bd1) Nov 26 03:11:21.429: INFO: At 2022-11-26 03:06:42 +0000 UTC - event for netserver-2: {kubelet bootstrap-e2e-minion-group-h8k8} BackOff: Back-off restarting failed container webserver in pod netserver-2_esipp-8765(e557b4f3-223a-4d0d-a27c-e4548f8c9d0a) Nov 26 03:11:21.429: INFO: At 2022-11-26 03:07:51 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} Killing: Stopping container webserver Nov 26 03:11:21.429: INFO: At 2022-11-26 03:07:52 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:11:21.429: INFO: At 2022-11-26 03:07:55 +0000 UTC - event for netserver-1: {kubelet bootstrap-e2e-minion-group-8wdk} BackOff: Back-off restarting failed container webserver in pod netserver-1_esipp-8765(c8e7b09c-3cf6-46fe-882a-b060c384559c) Nov 26 03:11:21.429: INFO: At 2022-11-26 03:09:35 +0000 UTC - event for external-local-nodeport-mp5bd: {kubelet bootstrap-e2e-minion-group-8wdk} Unhealthy: Readiness probe failed: Get "http://10.64.3.112:80/hostName": read tcp 10.64.3.1:53542->10.64.3.112:80: read: connection reset by peer Nov 26 03:11:21.429: INFO: At 2022-11-26 03:09:35 +0000 UTC - event for external-local-nodeport-mp5bd: {kubelet bootstrap-e2e-minion-group-8wdk} Killing: Stopping container netexec Nov 26 03:11:21.429: INFO: At 2022-11-26 03:09:36 +0000 UTC - event for external-local-nodeport-mp5bd: {kubelet bootstrap-e2e-minion-group-8wdk} SandboxChanged: Pod sandbox changed, it will be killed and re-created. Nov 26 03:11:21.472: INFO: POD NODE PHASE GRACE CONDITIONS Nov 26 03:11:21.472: INFO: external-local-nodeport-mp5bd bootstrap-e2e-minion-group-8wdk Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:17 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:09:37 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:09:37 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:17 +0000 UTC }] Nov 26 03:11:21.472: INFO: netserver-0 bootstrap-e2e-minion-group-7rps Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:20 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:08:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:08:11 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:20 +0000 UTC }] Nov 26 03:11:21.472: INFO: netserver-1 bootstrap-e2e-minion-group-8wdk Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:20 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:11:21 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:11:21 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:20 +0000 UTC }] Nov 26 03:11:21.472: INFO: netserver-2 bootstrap-e2e-minion-group-h8k8 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:20 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:10:21 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:10:21 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2022-11-26 03:06:20 +0000 UTC }] Nov 26 03:11:21.472: INFO: Nov 26 03:11:21.695: INFO: Logging node info for node bootstrap-e2e-master Nov 26 03:11:21.737: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-master 6434c658-db87-4566-9960-c594435d7ea0 8115 0 2022-11-26 02:57:42 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:56 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:08:15 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:08:15 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:08:15 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:08:15 +0000 UTC,LastTransitionTime:2022-11-26 02:57:41 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:08:15 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:34.168.169.190,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-master.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5736e6f149167618f71cd530dafef4cc,SystemUUID:5736e6f1-4916-7618-f71c-d530dafef4cc,BootID:aec7342f-3939-425a-bcb1-13b86fb32845,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:135160272,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:11:21.737: INFO: Logging kubelet events for node bootstrap-e2e-master Nov 26 03:11:21.783: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-master Nov 26 03:11:21.830: INFO: kube-controller-manager-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:21.830: INFO: Container kube-controller-manager ready: false, restart count 4 Nov 26 03:11:21.830: INFO: etcd-server-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:21.830: INFO: Container etcd-container ready: true, restart count 2 Nov 26 03:11:21.830: INFO: kube-scheduler-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:21.830: INFO: Container kube-scheduler ready: true, restart count 1 Nov 26 03:11:21.830: INFO: l7-lb-controller-bootstrap-e2e-master started at 2022-11-26 02:57:14 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:21.830: INFO: Container l7-lb-controller ready: true, restart count 6 Nov 26 03:11:21.830: INFO: konnectivity-server-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:21.830: INFO: Container konnectivity-server-container ready: true, restart count 3 Nov 26 03:11:21.830: INFO: etcd-server-events-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:21.830: INFO: Container etcd-container ready: true, restart count 0 Nov 26 03:11:21.830: INFO: kube-addon-manager-bootstrap-e2e-master started at 2022-11-26 02:57:14 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:21.830: INFO: Container kube-addon-manager ready: true, restart count 0 Nov 26 03:11:21.830: INFO: metadata-proxy-v0.1-zcw5j started at 2022-11-26 02:57:48 +0000 UTC (0+2 container statuses recorded) Nov 26 03:11:21.830: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:11:21.830: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:11:21.830: INFO: kube-apiserver-bootstrap-e2e-master started at 2022-11-26 02:56:57 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:21.830: INFO: Container kube-apiserver ready: true, restart count 0 Nov 26 03:11:22.025: INFO: Latency metrics for node bootstrap-e2e-master Nov 26 03:11:22.025: INFO: Logging node info for node bootstrap-e2e-minion-group-7rps Nov 26 03:11:22.067: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-7rps d3085899-95ef-4dfa-ae30-feaa3b8cf547 9629 0 2022-11-26 02:57:40 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-7rps kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-7rps topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-1869":"bootstrap-e2e-minion-group-7rps"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:40 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.0.0/24\"":{}}}} } {node-problem-detector Update v1 2022-11-26 03:07:46 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-26 03:10:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:10:52 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.0.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-7rps,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.0.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:07:46 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:04 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:04 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:04 +0000 UTC,LastTransitionTime:2022-11-26 02:57:40 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:10:04 +0000 UTC,LastTransitionTime:2022-11-26 02:57:42 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.4,},NodeAddress{Type:ExternalIP,Address:34.145.67.56,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-7rps.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:25dabe18cf1d9ba1fb46b48496913a34,SystemUUID:25dabe18-cf1d-9ba1-fb46-b48496913a34,BootID:cc4a1118-0d4a-44da-b481-07a484a9d681,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:11:22.067: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-7rps Nov 26 03:11:22.111: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-7rps Nov 26 03:11:22.163: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:02:54 +0000 UTC (0+7 container statuses recorded) Nov 26 03:11:22.163: INFO: Container csi-attacher ready: true, restart count 2 Nov 26 03:11:22.163: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 03:11:22.163: INFO: Container csi-resizer ready: true, restart count 2 Nov 26 03:11:22.163: INFO: Container csi-snapshotter ready: true, restart count 2 Nov 26 03:11:22.163: INFO: Container hostpath ready: true, restart count 2 Nov 26 03:11:22.163: INFO: Container liveness-probe ready: true, restart count 2 Nov 26 03:11:22.163: INFO: Container node-driver-registrar ready: true, restart count 2 Nov 26 03:11:22.163: INFO: csi-mockplugin-0 started at 2022-11-26 03:01:56 +0000 UTC (0+3 container statuses recorded) Nov 26 03:11:22.163: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 03:11:22.163: INFO: Container driver-registrar ready: false, restart count 4 Nov 26 03:11:22.163: INFO: Container mock ready: false, restart count 4 Nov 26 03:11:22.163: INFO: konnectivity-agent-q9wlj started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.163: INFO: Container konnectivity-agent ready: true, restart count 6 Nov 26 03:11:22.163: INFO: pod-d160ca42-832c-4ddf-b18b-b64476b424a2 started at 2022-11-26 03:08:50 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.163: INFO: Container write-pod ready: false, restart count 0 Nov 26 03:11:22.163: INFO: external-local-update-j72qp started at 2022-11-26 03:05:42 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.163: INFO: Container netexec ready: true, restart count 5 Nov 26 03:11:22.163: INFO: metrics-server-v0.5.2-867b8754b9-zrdj7 started at 2022-11-26 02:58:13 +0000 UTC (0+2 container statuses recorded) Nov 26 03:11:22.163: INFO: Container metrics-server ready: false, restart count 5 Nov 26 03:11:22.163: INFO: Container metrics-server-nanny ready: false, restart count 6 Nov 26 03:11:22.163: INFO: netserver-0 started at 2022-11-26 03:06:20 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.163: INFO: Container webserver ready: false, restart count 5 Nov 26 03:11:22.163: INFO: lb-sourcerange-6wkl4 started at 2022-11-26 03:10:28 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.163: INFO: Container netexec ready: true, restart count 0 Nov 26 03:11:22.163: INFO: metadata-proxy-v0.1-swmbn started at 2022-11-26 02:57:41 +0000 UTC (0+2 container statuses recorded) Nov 26 03:11:22.163: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:11:22.163: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:11:22.163: INFO: netserver-0 started at 2022-11-26 03:06:07 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.163: INFO: Container webserver ready: true, restart count 5 Nov 26 03:11:22.163: INFO: kube-proxy-bootstrap-e2e-minion-group-7rps started at 2022-11-26 02:57:40 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.163: INFO: Container kube-proxy ready: false, restart count 6 Nov 26 03:11:22.362: INFO: Latency metrics for node bootstrap-e2e-minion-group-7rps Nov 26 03:11:22.362: INFO: Logging node info for node bootstrap-e2e-minion-group-8wdk Nov 26 03:11:22.424: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-8wdk 99654f41-ed96-4b06-a6c8-1db40d65e751 9271 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-8wdk kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-8wdk topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-mock-csi-mock-volumes-4172":"bootstrap-e2e-minion-group-8wdk"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 03:01:23 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-11-26 03:07:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-11-26 03:09:49 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-8wdk,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:09:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:09:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:09:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:09:49 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.5,},NodeAddress{Type:ExternalIP,Address:34.168.227.133,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-8wdk.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:7f88ac496457f212a2a8dc4997301551,SystemUUID:7f88ac49-6457-f212-a2a8-dc4997301551,BootID:691b96ec-62db-4c87-94b3-0915996e979c,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/cpa/cluster-proportional-autoscaler@sha256:fd636b33485c7826fb20ef0688a83ee0910317dbb6c0c6f3ad14661c1db25def registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4],SizeBytes:15209393,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/nginx@sha256:5c99cf6a02adda929b10321dbf4ecfa00d87be9ba4fb456006237d530ab4baa1 registry.k8s.io/e2e-test-images/nginx:1.14-4],SizeBytes:6978614,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:11:22.424: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-8wdk Nov 26 03:11:22.481: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-8wdk Nov 26 03:11:22.569: INFO: pod-6f4844bc-1d15-4f79-abe4-3f079a630918 started at 2022-11-26 03:08:48 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container write-pod ready: false, restart count 0 Nov 26 03:11:22.569: INFO: inclusterclient started at 2022-11-26 03:04:10 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container inclusterclient ready: false, restart count 0 Nov 26 03:11:22.569: INFO: execpod-acceptrr72x started at 2022-11-26 03:10:23 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container agnhost-container ready: true, restart count 1 Nov 26 03:11:22.569: INFO: external-local-nodeport-mp5bd started at 2022-11-26 03:06:17 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container netexec ready: true, restart count 1 Nov 26 03:11:22.569: INFO: netserver-1 started at 2022-11-26 03:06:20 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container webserver ready: false, restart count 5 Nov 26 03:11:22.569: INFO: mutability-test-dpfps started at 2022-11-26 03:09:19 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container netexec ready: true, restart count 2 Nov 26 03:11:22.569: INFO: metadata-proxy-v0.1-fzfwr started at 2022-11-26 02:57:45 +0000 UTC (0+2 container statuses recorded) Nov 26 03:11:22.569: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:11:22.569: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:11:22.569: INFO: execpod-dropjvg4x started at 2022-11-26 03:10:25 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 03:11:22.569: INFO: konnectivity-agent-v4t28 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container konnectivity-agent ready: false, restart count 6 Nov 26 03:11:22.569: INFO: test-container-pod started at 2022-11-26 03:11:00 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container webserver ready: true, restart count 0 Nov 26 03:11:22.569: INFO: net-tiers-svc-b29kg started at 2022-11-26 03:06:52 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container netexec ready: true, restart count 1 Nov 26 03:11:22.569: INFO: csi-mockplugin-0 started at 2022-11-26 03:08:54 +0000 UTC (0+3 container statuses recorded) Nov 26 03:11:22.569: INFO: Container csi-provisioner ready: true, restart count 2 Nov 26 03:11:22.569: INFO: Container driver-registrar ready: true, restart count 2 Nov 26 03:11:22.569: INFO: Container mock ready: true, restart count 2 Nov 26 03:11:22.569: INFO: kube-dns-autoscaler-5f6455f985-54xp5 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container autoscaler ready: false, restart count 6 Nov 26 03:11:22.569: INFO: coredns-6d97d5ddb-6l6bj started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container coredns ready: false, restart count 6 Nov 26 03:11:22.569: INFO: external-provisioner-7fh56 started at 2022-11-26 03:07:05 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container nfs-provisioner ready: false, restart count 3 Nov 26 03:11:22.569: INFO: l7-default-backend-8549d69d99-kgcjh started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container default-http-backend ready: true, restart count 0 Nov 26 03:11:22.569: INFO: volume-snapshot-controller-0 started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container volume-snapshot-controller ready: true, restart count 6 Nov 26 03:11:22.569: INFO: kube-proxy-bootstrap-e2e-minion-group-8wdk started at 2022-11-26 02:57:44 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container kube-proxy ready: false, restart count 5 Nov 26 03:11:22.569: INFO: netserver-1 started at 2022-11-26 03:06:08 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:22.569: INFO: Container webserver ready: true, restart count 5 Nov 26 03:11:22.860: INFO: Latency metrics for node bootstrap-e2e-minion-group-8wdk Nov 26 03:11:22.860: INFO: Logging node info for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:11:22.911: INFO: Node Info: &Node{ObjectMeta:{bootstrap-e2e-minion-group-h8k8 d6f1c0c9-6047-409c-8381-ed8be6457456 9592 0 2022-11-26 02:57:44 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:bootstrap-e2e-minion-group-h8k8 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.hostpath.csi/node:bootstrap-e2e-minion-group-h8k8 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[csi.volume.kubernetes.io/nodeid:{"csi-hostpath-multivolume-388":"bootstrap-e2e-minion-group-h8k8","csi-hostpath-provisioning-5570":"bootstrap-e2e-minion-group-h8k8"} node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-11-26 02:57:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-11-26 02:57:45 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.2.0/24\"":{}}}} } {node-problem-detector Update v1 2022-11-26 03:07:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-11-26 03:09:10 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-11-26 03:10:46 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:csi.volume.kubernetes.io/nodeid":{}},"f:labels":{"f:topology.hostpath.csi/node":{}}},"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.2.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gce-upgrade/us-west1-b/bootstrap-e2e-minion-group-h8k8,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.2.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-11-26 03:07:50 +0000 UTC,LastTransitionTime:2022-11-26 02:57:48 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-11-26 02:57:56 +0000 UTC,LastTransitionTime:2022-11-26 02:57:56 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:01 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:01 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-11-26 03:10:01 +0000 UTC,LastTransitionTime:2022-11-26 02:57:44 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-11-26 03:10:01 +0000 UTC,LastTransitionTime:2022-11-26 02:57:45 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.168.177.45,},NodeAddress{Type:InternalDNS,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},NodeAddress{Type:Hostname,Address:bootstrap-e2e-minion-group-h8k8.c.k8s-jkns-gce-upgrade.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:5d45caccbe73d6049c0c7b580c44ebb3,SystemUUID:5d45cacc-be73-d604-9c0c-7b580c44ebb3,BootID:f77edcc1-2ac4-4561-a5dd-42b928bec3f2,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.7.0-beta.0-149-gd06318622,KubeletVersion:v1.27.0-alpha.0.50+70617042976dc1,KubeProxyVersion:v1.27.0-alpha.0.50+70617042976dc1,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/e2e-test-images/jessie-dnsutils@sha256:24aaf2626d6b27864c29de2097e8bbb840b3a414271bf7c8995e431e47d8408e registry.k8s.io/e2e-test-images/jessie-dnsutils:1.7],SizeBytes:112030336,},ContainerImage{Names:[registry.k8s.io/sig-storage/nfs-provisioner@sha256:e943bb77c7df05ebdc8c7888b2db289b13bf9f012d6a3a5a74f14d4d5743d439 registry.k8s.io/sig-storage/nfs-provisioner:v3.0.1],SizeBytes:90632047,},ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.50_70617042976dc1],SizeBytes:67201736,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/agnhost@sha256:16bbf38c463a4223d8cfe4da12bc61010b082a79b4bb003e2d3ba3ece5dd5f9e registry.k8s.io/e2e-test-images/agnhost:2.43],SizeBytes:51706353,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/httpd@sha256:148b022f5c5da426fc2f3c14b5c0867e58ef05961510c84749ac1fddcb0fef22 registry.k8s.io/e2e-test-images/httpd:2.4.38-4],SizeBytes:40764257,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-provisioner@sha256:ee3b525d5b89db99da3b8eb521d9cd90cb6e9ef0fbb651e98bb37be78d36b5b8 registry.k8s.io/sig-storage/csi-provisioner:v3.3.0],SizeBytes:25491225,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-resizer@sha256:425d8f1b769398127767b06ed97ce62578a3179bcb99809ce93a1649e025ffe7 registry.k8s.io/sig-storage/csi-resizer:v1.6.0],SizeBytes:24148884,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-snapshotter@sha256:291334908ddf71a4661fd7f6d9d97274de8a5378a2b6fdfeb2ce73414a34f82f registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0],SizeBytes:23881995,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-attacher@sha256:9a685020911e2725ad019dbce6e4a5ab93d51e3d4557f115e64343345e05781b registry.k8s.io/sig-storage/csi-attacher:v4.0.0],SizeBytes:23847201,},ContainerImage{Names:[registry.k8s.io/sig-storage/hostpathplugin@sha256:92257881c1d6493cf18299a24af42330f891166560047902b8d431fb66b01af5 registry.k8s.io/sig-storage/hostpathplugin:v1.9.0],SizeBytes:18758628,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/sig-storage/csi-node-driver-registrar@sha256:0103eee7c35e3e0b5cd8cdca9850dc71c793cdeb6669d8be7a89440da2d06ae4 registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1],SizeBytes:9133109,},ContainerImage{Names:[registry.k8s.io/sig-storage/livenessprobe@sha256:933940f13b3ea0abc62e656c1aa5c5b47c04b15d71250413a6b821bd0c58b94e registry.k8s.io/sig-storage/livenessprobe:v2.7.0],SizeBytes:8688564,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:c318242786b139d18676b1c09a0ad7f15fc17f8f16a5b2e625cd0dc8c9703daf registry.k8s.io/e2e-test-images/busybox:1.29-2],SizeBytes:732424,},ContainerImage{Names:[registry.k8s.io/e2e-test-images/busybox@sha256:2e0f836850e09b8b7cc937681d6194537a09fbd5f6b9e08f4d646a85128e8937 registry.k8s.io/e2e-test-images/busybox:1.29-4],SizeBytes:731990,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},ContainerImage{Names:[registry.k8s.io/pause@sha256:9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d registry.k8s.io/pause:3.8],SizeBytes:311286,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Nov 26 03:11:22.911: INFO: Logging kubelet events for node bootstrap-e2e-minion-group-h8k8 Nov 26 03:11:22.955: INFO: Logging pods the kubelet thinks is on node bootstrap-e2e-minion-group-h8k8 Nov 26 03:11:23.025: INFO: netserver-2 started at 2022-11-26 03:06:20 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:23.025: INFO: Container webserver ready: true, restart count 3 Nov 26 03:11:23.025: INFO: csi-mockplugin-0 started at 2022-11-26 03:03:22 +0000 UTC (0+4 container statuses recorded) Nov 26 03:11:23.025: INFO: Container busybox ready: false, restart count 4 Nov 26 03:11:23.025: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 03:11:23.025: INFO: Container driver-registrar ready: false, restart count 4 Nov 26 03:11:23.025: INFO: Container mock ready: false, restart count 4 Nov 26 03:11:23.025: INFO: csi-mockplugin-0 started at 2022-11-26 03:02:45 +0000 UTC (0+4 container statuses recorded) Nov 26 03:11:23.025: INFO: Container busybox ready: false, restart count 3 Nov 26 03:11:23.025: INFO: Container csi-provisioner ready: false, restart count 4 Nov 26 03:11:23.025: INFO: Container driver-registrar ready: true, restart count 5 Nov 26 03:11:23.025: INFO: Container mock ready: true, restart count 5 Nov 26 03:11:23.025: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:08:47 +0000 UTC (0+7 container statuses recorded) Nov 26 03:11:23.025: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container hostpath ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 03:11:23.025: INFO: hostexec-bootstrap-e2e-minion-group-h8k8-q7bst started at 2022-11-26 03:10:20 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:23.025: INFO: Container agnhost-container ready: true, restart count 0 Nov 26 03:11:23.025: INFO: kube-proxy-bootstrap-e2e-minion-group-h8k8 started at 2022-11-26 02:57:44 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:23.025: INFO: Container kube-proxy ready: true, restart count 6 Nov 26 03:11:23.025: INFO: metadata-proxy-v0.1-9vqbj started at 2022-11-26 02:57:45 +0000 UTC (0+2 container statuses recorded) Nov 26 03:11:23.025: INFO: Container metadata-proxy ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Nov 26 03:11:23.025: INFO: coredns-6d97d5ddb-x27zq started at 2022-11-26 02:58:05 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:23.025: INFO: Container coredns ready: false, restart count 6 Nov 26 03:11:23.025: INFO: netserver-2 started at 2022-11-26 03:06:08 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:23.025: INFO: Container webserver ready: false, restart count 3 Nov 26 03:11:23.025: INFO: pod-subpath-test-preprovisionedpv-2cvw started at 2022-11-26 03:10:32 +0000 UTC (1+2 container statuses recorded) Nov 26 03:11:23.025: INFO: Init container init-volume-preprovisionedpv-2cvw ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container test-container-subpath-preprovisionedpv-2cvw ready: true, restart count 2 Nov 26 03:11:23.025: INFO: Container test-container-volume-preprovisionedpv-2cvw ready: true, restart count 2 Nov 26 03:11:23.025: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:08:55 +0000 UTC (0+7 container statuses recorded) Nov 26 03:11:23.025: INFO: Container csi-attacher ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container csi-provisioner ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container csi-resizer ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container csi-snapshotter ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container hostpath ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container liveness-probe ready: true, restart count 0 Nov 26 03:11:23.025: INFO: Container node-driver-registrar ready: true, restart count 0 Nov 26 03:11:23.025: INFO: konnectivity-agent-t5nvv started at 2022-11-26 02:57:56 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:23.025: INFO: Container konnectivity-agent ready: false, restart count 5 Nov 26 03:11:23.025: INFO: back-off-cap started at 2022-11-26 02:59:37 +0000 UTC (0+1 container statuses recorded) Nov 26 03:11:23.025: INFO: Container back-off-cap ready: false, restart count 7 Nov 26 03:11:23.025: INFO: csi-hostpathplugin-0 started at 2022-11-26 03:03:12 +0000 UTC (0+7 container statuses recorded) Nov 26 03:11:23.025: INFO: Container csi-attacher ready: false, restart count 5 Nov 26 03:11:23.025: INFO: Container csi-provisioner ready: false, restart count 5 Nov 26 03:11:23.025: INFO: Container csi-resizer ready: false, restart count 5 Nov 26 03:11:23.025: INFO: Container csi-snapshotter ready: false, restart count 5 Nov 26 03:11:23.025: INFO: Container hostpath ready: false, restart count 5 Nov 26 03:11:23.025: INFO: Container liveness-probe ready: false, restart count 5 Nov 26 03:11:23.025: INFO: Container node-driver-registrar ready: false, restart count 5 Nov 26 03:11:23.281: INFO: Latency metrics for node bootstrap-e2e-minion-group-h8k8 [DeferCleanup (Each)] [sig-network] LoadBalancers ESIPP [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "esipp-8765" for this suite. 11/26/22 03:11:23.281
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-network\]\sLoadBalancers\sESIPP\s\[Slow\]\sshould\swork\sfrom\spods$'
test/e2e/network/loadbalancer.go:1476 k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1476 +0xabd
[BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 11/26/22 03:20:34.49 Nov 26 03:20:34.490: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename esipp 11/26/22 03:20:34.492 STEP: Waiting for a default service account to be provisioned in namespace 11/26/22 03:20:34.822 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 11/26/22 03:20:34.952 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-network] LoadBalancers ESIPP [Slow] test/e2e/network/loadbalancer.go:1250 [It] should work from pods test/e2e/network/loadbalancer.go:1422 STEP: creating a service esipp-5196/external-local-pods with type=LoadBalancer 11/26/22 03:20:35.183 STEP: setting ExternalTrafficPolicy=Local 11/26/22 03:20:35.183 STEP: waiting for loadbalancer for service esipp-5196/external-local-pods 11/26/22 03:20:35.406 Nov 26 03:20:35.406: INFO: Waiting up to 15m0s for service "external-local-pods" to have a LoadBalancer STEP: creating a pod to be part of the service external-local-pods 11/26/22 03:21:11.57 Nov 26 03:21:11.671: INFO: Waiting up to 2m0s for 1 pods to be created Nov 26 03:21:11.773: INFO: Found 0/1 pods - will retry Nov 26 03:21:13.885: INFO: Found all 1 pods Nov 26 03:21:13.885: INFO: Waiting up to 2m0s for 1 pods to be running and ready: [external-local-pods-td9g8] Nov 26 03:21:13.885: INFO: Waiting up to 2m0s for pod "external-local-pods-td9g8" in namespace "esipp-5196" to be "running and ready" Nov 26 03:21:13.966: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 81.574492ms Nov 26 03:21:13.966: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:16.024: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 2.139229071s Nov 26 03:21:16.024: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:18.017: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 4.132484836s Nov 26 03:21:18.017: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:20.097: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 6.212797648s Nov 26 03:21:20.097: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:22.109: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 8.224798069s Nov 26 03:21:22.109: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:24.067: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 10.18248609s Nov 26 03:21:24.067: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:26.032: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 12.147125503s Nov 26 03:21:26.032: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:28.019: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 14.134528296s Nov 26 03:21:28.019: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:30.052: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 16.167257538s Nov 26 03:21:30.052: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:32.106: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 18.221664545s Nov 26 03:21:32.106: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:34.074: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 20.189457416s Nov 26 03:21:34.074: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:36.047: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 22.162323392s Nov 26 03:21:36.047: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:38.024: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 24.139120253s Nov 26 03:21:38.024: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:40.022: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 26.137360583s Nov 26 03:21:40.022: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:42.022: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 28.137126s Nov 26 03:21:42.022: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:44.025: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 30.140130187s Nov 26 03:21:44.025: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on '' to be 'Running' but was 'Pending' Nov 26 03:21:46.034: INFO: Pod "external-local-pods-td9g8": Phase="Pending", Reason="", readiness=false. Elapsed: 32.14971405s Nov 26 03:21:46.034: INFO: Error evaluating pod condition running and ready: want pod 'external-local-pods-td9g8' on 'bootstrap-e2e-minion-group-h8k8' to be 'Running' but was 'Pending' Nov 26 03:21:48.020: INFO: Pod "external-local-pods-td9g8": Phase="Running", Reason="", readiness=true. Elapsed: 34.135643088s Nov 26 03:21:48.020: INFO: Pod "external-local-pods-td9g8" satisfied condition "running and ready" Nov 26 03:21:48.020: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [external-local-pods-td9g8] STEP: waiting for loadbalancer for service esipp-5196/external-local-pods 11/26/22 03:21:48.02 Nov 26 03:21:48.020: INFO: Waiting up to 15m0s for service "external-local-pods" to have a LoadBalancer STEP: Creating pause pod deployment to make sure, pausePods are in desired state 11/26/22 03:21:48.086 Nov 26 03:21:48.267: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:0, Replicas:0, UpdatedReplicas:0, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:0, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:time.Date(2022, time.November, 26, 3, 21, 48, 0, time.Local), LastTransitionTime:time.Date(2022, time.November, 26, 3, 21, 48, 0, time.Local), Reason:"NewReplicaSetCreated", Message:"Created new replica set \"pause-pod-deployment-648855d779\""}}, CollisionCount:(*int32)(nil)} Nov 26 03:21:50.511: INFO: Waiting up to 5m0s curl 104.196.242.126:80/clientip STEP: Hitting external lb 104.196.242.126 from pod pause-pod-deployment-648855d779-zz4dx on node bootstrap-e2e-minion-group-h8k8 11/26/22 03:21:50.572 Nov 26 03:21:50.572: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:21:50.971: INFO: rc: 1 Nov 26 03:21:50.971: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:21:52.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:21:53.422: INFO: rc: 1 Nov 26 03:21:53.422: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:21:54.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:21:55.577: INFO: rc: 1 Nov 26 03:21:55.577: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:21:56.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:21:57.391: INFO: rc: 1 Nov 26 03:21:57.391: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:21:58.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:21:59.463: INFO: rc: 1 Nov 26 03:21:59.463: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:22:00.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:22:01.465: INFO: rc: 1 Nov 26 03:22:01.465: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:22:02.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:22:03.412: INFO: rc: 1 Nov 26 03:22:03.412: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:22:04.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:22:05.503: INFO: rc: 1 Nov 26 03:22:05.503: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:22:06.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:22:07.484: INFO: rc: 1 Nov 26 03:22:07.484: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:22:08.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:22:39.419: INFO: rc: 1 Nov 26 03:22:39.420: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: context deadline exceeded: connection error: desc = "transport: Error while dialing dial unix /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket: connect: no such file or directory" error: exit status 1, retry until timeout Nov 26 03:22:40.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:23:11.344: INFO: rc: 1 Nov 26 03:23:11.344: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: context deadline exceeded: connection error: desc = "transport: Error while dialing dial unix /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket: connect: no such file or directory" error: exit status 1, retry until timeout Nov 26 03:23:12.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:23:43.436: INFO: rc: 1 Nov 26 03:23:43.436: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: context deadline exceeded: connection error: desc = "transport: Error while dialing dial unix /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket: connect: no such file or directory" error: exit status 1, retry until timeout Nov 26 03:23:44.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:24:15.440: INFO: rc: 1 Nov 26 03:24:15.441: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: context deadline exceeded: connection error: desc = "transport: Error while dialing dial unix /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket: connect: no such file or directory" error: exit status 1, retry until timeout Nov 26 03:24:16.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:24:47.341: INFO: rc: 1 Nov 26 03:24:47.341: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: context deadline exceeded: connection error: desc = "transport: Error while dialing dial unix /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket: connect: no such file or directory" error: exit status 1, retry until timeout Nov 26 03:24:48.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:29.509: INFO: stderr: "+ curl -q -s --connect-timeout 30 104.196.242.126:80/clientip\n" Nov 26 03:25:29.509: INFO: stdout: "" Nov 26 03:25:30.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:31.442: INFO: rc: 1 Nov 26 03:25:31.442: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:32.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:33.616: INFO: rc: 1 Nov 26 03:25:33.616: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:34.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' ------------------------------ Progress Report for Ginkgo Process #2 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 5m0.694s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 5m0.001s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 104.196.242.126 from pod pause-pod-deployment-648855d779-zz4dx on node bootstrap-e2e-minion-group-h8k8 (Step Runtime: 3m44.612s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 4602 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc0027b6000?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc004b52cb0?, 0x1?}, {0xc005c4dad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0001b0000?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0001b0000}, 0xc004dbe528, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0001b0000}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0001b0000}, 0x0?, 0xc005c4dd00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc000e3a380?, 0x7a?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0008cdc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:25:35.453: INFO: rc: 1 Nov 26 03:25:35.453: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:36.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:37.412: INFO: rc: 1 Nov 26 03:25:37.412: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:38.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:39.719: INFO: rc: 1 Nov 26 03:25:39.719: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:40.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:41.531: INFO: rc: 1 Nov 26 03:25:41.531: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:42.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:43.590: INFO: rc: 1 Nov 26 03:25:43.590: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:44.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:45.701: INFO: rc: 1 Nov 26 03:25:45.701: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:46.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:47.381: INFO: rc: 1 Nov 26 03:25:47.381: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:48.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:49.499: INFO: rc: 1 Nov 26 03:25:49.499: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:50.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:51.485: INFO: rc: 1 Nov 26 03:25:51.485: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:52.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:53.699: INFO: rc: 1 Nov 26 03:25:53.699: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:54.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' ------------------------------ Progress Report for Ginkgo Process #2 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 5m20.697s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 5m20.004s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 104.196.242.126 from pod pause-pod-deployment-648855d779-zz4dx on node bootstrap-e2e-minion-group-h8k8 (Step Runtime: 4m4.615s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 4602 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000d01ce0?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc004b52cb0?, 0x1?}, {0xc005c4dad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0001b0000?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0001b0000}, 0xc004dbe528, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0001b0000}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0001b0000}, 0x0?, 0xc005c4dd00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc000e3a380?, 0x7a?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0008cdc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:25:55.484: INFO: rc: 1 Nov 26 03:25:55.484: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:56.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:57.515: INFO: rc: 1 Nov 26 03:25:57.515: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:25:58.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:25:59.937: INFO: rc: 1 Nov 26 03:25:59.937: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:00.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:01.473: INFO: rc: 1 Nov 26 03:26:01.473: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:02.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:03.450: INFO: rc: 1 Nov 26 03:26:03.450: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:04.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:05.754: INFO: rc: 1 Nov 26 03:26:05.754: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:06.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:07.363: INFO: rc: 1 Nov 26 03:26:07.363: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:08.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:09.601: INFO: rc: 1 Nov 26 03:26:09.601: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:10.972: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:11.352: INFO: rc: 1 Nov 26 03:26:11.353: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:12.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:13.396: INFO: rc: 1 Nov 26 03:26:13.396: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:14.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' ------------------------------ Progress Report for Ginkgo Process #2 Automatically polling progress: [sig-network] LoadBalancers ESIPP [Slow] should work from pods (Spec Runtime: 5m40.7s) test/e2e/network/loadbalancer.go:1422 In [It] (Node Runtime: 5m40.007s) test/e2e/network/loadbalancer.go:1422 At [By Step] Hitting external lb 104.196.242.126 from pod pause-pod-deployment-648855d779-zz4dx on node bootstrap-e2e-minion-group-h8k8 (Step Runtime: 4m24.618s) test/e2e/network/loadbalancer.go:1466 Spec Goroutine goroutine 4602 [select] k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.ExecWithFullOutput({0xc000e93600?, 0x0?}) test/e2e/framework/kubectl/builder.go:125 k8s.io/kubernetes/test/e2e/framework/kubectl.KubectlBuilder.Exec(...) test/e2e/framework/kubectl/builder.go:107 k8s.io/kubernetes/test/e2e/framework/kubectl.RunKubectl({0xc004b52cb0?, 0x1?}, {0xc005c4dad8?, 0x101010020?, 0x0?}) test/e2e/framework/kubectl/builder.go:154 k8s.io/kubernetes/test/e2e/framework/pod/output.RunHostCmd(...) test/e2e/framework/pod/output/output.go:82 > k8s.io/kubernetes/test/e2e/network.glob..func20.6.3() test/e2e/network/loadbalancer.go:1468 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.ConditionFunc.WithContext.func1({0x2742871, 0x0}) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:222 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.runConditionWithCrashProtectionWithContext({0x7fe0bc8?, 0xc0001b0000?}, 0x2?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:235 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.WaitForWithContext({0x7fe0bc8, 0xc0001b0000}, 0xc004dbe528, 0x2fdb16a?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:662 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.poll({0x7fe0bc8, 0xc0001b0000}, 0xb0?, 0x2fd9d05?, 0x28?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:596 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediateWithContext({0x7fe0bc8, 0xc0001b0000}, 0x0?, 0xc005c4dd00?, 0x262a967?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:528 k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait.PollImmediate(0xc000e3a380?, 0x7a?, 0x0?) vendor/k8s.io/apimachinery/pkg/util/wait/wait.go:514 > k8s.io/kubernetes/test/e2e/network.glob..func20.6() test/e2e/network/loadbalancer.go:1467 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc0008cdc80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Nov 26 03:26:15.321: INFO: rc: 1 Nov 26 03:26:15.321: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:16.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:17.309: INFO: rc: 1 Nov 26 03:26:17.309: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:18.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:19.394: INFO: rc: 1 Nov 26 03:26:19.394: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:20.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:21.320: INFO: rc: 1 Nov 26 03:26:21.320: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:22.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:23.386: INFO: rc: 1 Nov 26 03:26:23.386: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:24.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:25.313: INFO: rc: 1 Nov 26 03:26:25.313: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: error dialing backend: No agent available error: exit status 1, retry until timeout Nov 26 03:26:26.971: INFO: Running '/workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip' Nov 26 03:26:27.313: INFO: rc: 1 Nov 26 03:26:27.314: INFO: got err: error running /workspace/github.com/containerd/containerd/kubernetes/platforms/linux/amd64/kubectl --server=https://34.168.169.190 --kubeconfig=/workspace/.kube/config --namespace=esipp-5196 exec pause-pod-deployment-648855d779-zz4dx -- /bin/sh -x -c curl -q -s --connect-timeout 30 104.196.242.126:80/clientip: Command stdout: stderr: Error from server: er